From 8878f08b9b782debc908bf1b67f2159dc7d6d8db Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Tue, 29 Jul 2025 16:07:54 +0300 Subject: [PATCH 001/157] nvmeof: add nvmeof ability create prototype of nvmeof csi-driver. create\delete volume were implemented. Signed-off-by: gadi-didi --- go.mod | 5 +- go.sum | 2 + .../nvmeof/controller/controllerserver.go | 558 ++++++++++++++++++ internal/nvmeof/nvmeof.go | 418 +++++++++++++ 4 files changed, 980 insertions(+), 3 deletions(-) create mode 100644 internal/nvmeof/controller/controllerserver.go create mode 100644 internal/nvmeof/nvmeof.go diff --git a/go.mod b/go.mod index fc7d895f347..042907acd0e 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/ceph/ceph-csi -go 1.24.0 - -toolchain go1.24.2 +go 1.24.4 // our own API replace github.com/ceph/ceph-csi/api => ./api @@ -14,6 +12,7 @@ require ( github.com/aws/aws-sdk-go v1.55.8 github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 + github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423 github.com/ceph/go-ceph v0.34.0 github.com/container-storage-interface/spec v1.11.0 github.com/csi-addons/kubernetes-csi-addons v0.13.0 diff --git a/go.sum b/go.sum index bf76a430213..a2a49178de1 100644 --- a/go.sum +++ b/go.sum @@ -132,6 +132,8 @@ github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4r github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423 h1:Q7lE8B6IaLc3j7Af4uKS4Lgpc500g6IwIzpIVzzEbJM= +github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423/go.mod h1:P+rCBJEUy2Yt6vOcTh7pyh9/g+o9pOZc7kyY6k/PS5U= github.com/ceph/go-ceph v0.34.0 h1:C45yU8VRl0Rg+/I0qw5bzT337HG6DL0yBQ0VR6QHv4o= github.com/ceph/go-ceph v0.34.0/go.mod h1:otRLwpVgM81lK5zdGYOfr4OELdeS97luDBE/PjXAB5o= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go new file mode 100644 index 00000000000..dbeb1e4c02b --- /dev/null +++ b/internal/nvmeof/controller/controllerserver.go @@ -0,0 +1,558 @@ +/* +Copyright 2025 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "strconv" + + "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + csicommon "github.com/ceph/ceph-csi/internal/csi-common" + "github.com/ceph/ceph-csi/internal/nvmeof" + "github.com/ceph/ceph-csi/internal/rbd" + rbdutil "github.com/ceph/ceph-csi/internal/rbd" + rbddriver "github.com/ceph/ceph-csi/internal/rbd/driver" + "github.com/ceph/ceph-csi/internal/util/log" +) + +type Server struct { + csi.UnimplementedControllerServer + + // backendServer handles the RBD requests + backendServer *rbd.ControllerServer +} + +// NewControllerServer initialize a controller server for nvmeof CSI driver. +func NewControllerServer(d *csicommon.CSIDriver) (*Server, error) { + return &Server{ + backendServer: rbddriver.NewControllerServer(d), + }, nil +} + +// ControllerGetCapabilities uses the RBD backendServer to return the +// capabilities that were set in the Driver.Run() function. +func (cs *Server) ControllerGetCapabilities( + ctx context.Context, + req *csi.ControllerGetCapabilitiesRequest, +) (*csi.ControllerGetCapabilitiesResponse, error) { + return cs.backendServer.ControllerGetCapabilities(ctx, req) +} + +// ValidateVolumeCapabilities checks whether the volume capabilities requested +// are supported. +func (cs *Server) ValidateVolumeCapabilities( + ctx context.Context, + req *csi.ValidateVolumeCapabilitiesRequest, +) (*csi.ValidateVolumeCapabilitiesResponse, error) { + return cs.backendServer.ValidateVolumeCapabilities(ctx, req) +} + +// CreateVolume creates a new RBD volume and exposes it through NVMe-oF Gateway. +func (cs *Server) CreateVolume( + ctx context.Context, + req *csi.CreateVolumeRequest, +) (*csi.CreateVolumeResponse, error) { + // TODO - need to check and modify the request to ensure it has the required fields NvmeOF supports (like nfs does) + // TODO - in case of failure, we should clean up the created resources (RBD volume, subsystem, etc.) + // TODO - move all hardcoded strings to constants + + // Step 0: Validate request + if err := validateCreateVolumeRequest(req); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "request validation failed: %v", err) + } + + // Step 1: Create RBD volume through backend. if exists, it is ok. + res, err := cs.backendServer.CreateVolume(ctx, req) + if err != nil { + log.ErrorLog(ctx, "failed to create RBD volume: %v", err) + + return nil, err + } + + backend := res.GetVolume() + volumeID := backend.GetVolumeId() + rbdImageName := res.GetVolume().GetVolumeContext()["imageName"] + rbdPoolName := res.GetVolume().GetVolumeContext()["pool"] + + // Step 2: Setup NVMe-oF resources + nvmeofData, err := createNVMeoFResources(ctx, req, rbdPoolName, rbdImageName) + if err != nil { + log.ErrorLog(ctx, "NVMe-oF resource setup failed for volumeID %s: %v", volumeID, err) + // TODO: Implement cleanup of RBD volume on NVMe-oF failure + + return nil, status.Errorf(codes.Internal, "NVMe-oF setup failed: %v", err) + } + + // step 3: Populate volume context for NodeServer + populateVolumeContext(backend, nvmeofData) + + // Step 4: Store NVMe-oF metadata in the volume context + if err := cs.storeNVMeoFMetadata(ctx, req, volumeID, nvmeofData); err != nil { + return nil, err // Error already formatted with proper status code + } + + return &csi.CreateVolumeResponse{Volume: backend}, nil +} + +// DeleteVolume removes the volume from the gateaway and deletes the RBD-image from the backend. +func (cs *Server) DeleteVolume( + ctx context.Context, + req *csi.DeleteVolumeRequest, +) (*csi.DeleteVolumeResponse, error) { + volumeID := req.GetVolumeId() + if volumeID == "" { + return nil, status.Errorf(codes.InvalidArgument, "empty volume ID in request") + } + // Get NVMe-oF metadata for cleanup + nvmeofData, err := cs.getNVMeoFMetadata(ctx, req, volumeID) + if err != nil { + log.DebugLog(ctx, "No NVMe-oF metadata found, skipping NVMe-oF cleanup: %v", err) + } else { + // Clean up NVMe-oF resources + if err := cleanupNVMeoFResources(ctx, req, nvmeofData); err != nil { + log.ErrorLog(ctx, "NVMe-oF cleanup failed (continuing with RBD deletion): %v", err) + + return nil, status.Errorf(codes.Internal, "NVMe-oF cleanup failed: %v", err) + } + } + // Delete RBD volume through backend + + return cs.backendServer.DeleteVolume(ctx, req) +} + +// validateCreateVolumeRequest validates the incoming request for nvmeof. +// the rest of the parameters are validated by RBD. +func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error { + // Validate required parameters + params := req.GetParameters() + requiredParams := []string{ + "subsystemNQN", "hostNQN", "nvmeofGatewayAddress", "nvmeofGatewayPort", + "listenerIpAddress", "listenerPort", "listenerHostname", + } + for _, param := range requiredParams { + if params[param] == "" { + return fmt.Errorf("missing required parameter: %s", param) + } + } + + return nil +} + +// ensureSubsystem checks if the subsystem exists, and creates it if not. +func ensureSubsystem(ctx context.Context, gateway *nvmeof.GatewayRpcClient, subsystemNQN string) error { + exists, err := gateway.SubsystemExists(ctx, subsystemNQN) + if err != nil { + return err + } + if exists { + return nil + } + // Create if doesn't exist (controller decision) + + return gateway.CreateSubsystem(ctx, subsystemNQN) +} + +// cleanupEmptySubsystem checks if the subsystem is empty (no namespaces), if so, deletes it. +func cleanupEmptySubsystem(ctx context.Context, gateway *nvmeof.GatewayRpcClient, subsystemNQN string, +) error { + if subsystemNQN == "" { + return nil + } + exists, err := gateway.SubsystemExists(ctx, subsystemNQN) + if err != nil { + return err + } + if !exists { // In case the subsystem already was deleted, return no error + log.DebugLog(ctx, "Subsystem %s does not exists", subsystemNQN) + + return nil + } + // Check if subsystem has any remaining namespaces + namespaces, err := gateway.ListNamespaces(ctx, subsystemNQN) + if err != nil { + return fmt.Errorf("failed to list namespaces: %w", err) + } + if len(namespaces.GetNamespaces()) != 0 { + log.DebugLog(ctx, "Subsystem %s still has %d namespaces, keeping", + subsystemNQN, len(namespaces.GetNamespaces())) + + return nil + } + log.DebugLog(ctx, "Subsystem %s is empty, deleting", subsystemNQN) + err = gateway.DeleteSubsystem(ctx, subsystemNQN) + if err != nil { + return fmt.Errorf("failed to delete empty subsystem: %w", err) + } + log.DebugLog(ctx, "Empty subsystem %s deleted", subsystemNQN) + + return nil +} + +// createNVMeoFResources sets up the NVMe-oF resources for the given RBD volume. +// TODO - need to support multiple listeners. +// TODO - need to fallback cleanup if any step fails. +func createNVMeoFResources( + ctx context.Context, + req *csi.CreateVolumeRequest, + rbdPoolName, + rbdImageName string, +) (*nvmeof.NVMeoFVolumeData, error) { + // Step 1: Extract parameters (already validated) + params := req.GetParameters() + listenerPortStr := params["listenerPort"] + listenerPort, err := strconv.ParseUint(listenerPortStr, 10, 32) + if err != nil { + return nil, fmt.Errorf("invalid listenerPort %s: %w", listenerPortStr, err) + } + nvmeofGatewayPortStr := params["nvmeofGatewayPort"] + nvmeofGatewayPort, err := strconv.ParseUint(nvmeofGatewayPortStr, 10, 32) + if err != nil { + return nil, fmt.Errorf("invalid nvmeofGatewayPort %s: %w", nvmeofGatewayPortStr, err) + } + nvmeofData := &nvmeof.NVMeoFVolumeData{ + SubsystemNQN: params["subsystemNQN"], + NamespaceID: 0, // will be set after namespace creation + HostNQN: params["hostNQN"], + ListenerInfo: nvmeof.ListenerDetails{ + GatewayAddress: nvmeof.GatewayAddress{ + Address: params["listenerIpAddress"], + Port: uint32(listenerPort), + }, + Hostname: params["listenerHostname"], + }, + GatewayManagementInfo: nvmeof.GatewayConfig{ + Address: params["nvmeofGatewayAddress"], + Port: uint32(nvmeofGatewayPort), + }, + } + + // Step 2: Connect to gateway + config, err := getGatewayConfigFromRequest(params) + if err != nil { + return nil, err + } + gateway, err := connectGateway(ctx, config) + if err != nil { + return nil, fmt.Errorf("gateway connection failed: %w", err) + } + defer func() { + if closeErr := gateway.Destroy(); closeErr != nil { + log.ErrorLog(ctx, "Warning: failed to close gateway connection: %v", closeErr) + } + }() + + // Step 3: Ensure subsystem exists + if err := ensureSubsystem(ctx, gateway, nvmeofData.SubsystemNQN); err != nil { + return nil, fmt.Errorf("subsystem setup failed: %w", err) + } + + // Step 4: Create namespace + nsid, err := gateway.CreateNamespace(ctx, nvmeofData.SubsystemNQN, rbdPoolName, rbdImageName) + if err != nil { + return nil, fmt.Errorf("namespace creation failed: %w", err) + } + log.DebugLog(ctx, "Namespace created: %s/%s with NSID: %d", rbdPoolName, rbdImageName, nsid) + nvmeofData.NamespaceID = nsid + + // Step 5: Create listeners + err = gateway.CreateListener(ctx, nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) + if err != nil { + return nil, fmt.Errorf("listener creation failed: %w", err) + } + log.DebugLog(ctx, "Listener created for subsystem %s at %s", nvmeofData.SubsystemNQN, + nvmeofData.ListenerInfo) + + // Step 6: Add host to subsystem + if err := gateway.AddHost(ctx, nvmeofData.SubsystemNQN, nvmeofData.HostNQN); err != nil { + return nil, fmt.Errorf("host addition failed: %w", err) + } + log.DebugLog(ctx, "Host added: %s to subsystem %s", nvmeofData.HostNQN, nvmeofData.SubsystemNQN) + + return nvmeofData, nil +} + +// cleanupNVMeoFResources cleans up NVMe-oF resources associated with the volume. +// This includes removing the host, listener, namespace, and potentially the subsystem. +func cleanupNVMeoFResources( + ctx context.Context, + req *csi.DeleteVolumeRequest, + nvmeofData *nvmeof.NVMeoFVolumeData, +) error { + // Step 1: Connect to gateway using stored management address + gateway, err := connectGateway(ctx, &nvmeof.GatewayConfig{ + Address: nvmeofData.GatewayManagementInfo.Address, + Port: nvmeofData.GatewayManagementInfo.Port, + }) + if err != nil { + return fmt.Errorf("failed to connect to gateway for cleanup: %w", err) + } + defer func() { + if closeErr := gateway.Destroy(); closeErr != nil { + log.ErrorLog(ctx, "Warning: failed to close gateway connection: %v", closeErr) + } + }() + + // TODO: maybe just check before if the subsystem exists, if not , + // there is no relevant to continue , will make it simple ? + // instead of check in the std error if "not found".. + + // Step 2: Remove host from subsystem + if err := gateway.RemoveHost(ctx, nvmeofData.SubsystemNQN, nvmeofData.HostNQN); err != nil { + return fmt.Errorf("failed to remove host %s from subsystem %s: %w", + nvmeofData.HostNQN, nvmeofData.SubsystemNQN, err) + } + log.DebugLog(ctx, "Host %s removed from subsystem %s", nvmeofData.HostNQN, nvmeofData.SubsystemNQN) + + // Step 3: Delete listener + err = gateway.DeleteListener(ctx, nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) + if err != nil { + return fmt.Errorf("failed to delete listener for subsystem %s: %w", nvmeofData.SubsystemNQN, err) + } + log.DebugLog(ctx, "Listener deleted for subsystem %s at %s", nvmeofData.SubsystemNQN, + nvmeofData.ListenerInfo) + + // Step 4: Delete namespace + if err := gateway.DeleteNamespace(ctx, nvmeofData.SubsystemNQN, nvmeofData.NamespaceID); err != nil { + return fmt.Errorf("failed to delete namespace %d for subsystem %s: %w", + nvmeofData.NamespaceID, nvmeofData.SubsystemNQN, err) + } + log.DebugLog(ctx, "Namespace %d deleted for subsystem %s", nvmeofData.NamespaceID, nvmeofData.SubsystemNQN) + + // Step 5: Cleanup empty subsystem + if err := cleanupEmptySubsystem(ctx, gateway, nvmeofData.SubsystemNQN); err != nil { + return fmt.Errorf("failed to cleanup empty subsystem %s: %w", nvmeofData.SubsystemNQN, err) + } + log.DebugLog(ctx, "NVMe-oF resources cleaned up for volume with ID %s", req.GetVolumeId()) + + return nil +} + +const ( + // NVMe-oF resource info. + mdSubsystemNQN = ".rbd.nvmeof.SubsystemNQN" + mdNamespaceID = ".rbd.nvmeof.NamespaceID" + mdHostNQN = ".rbd.nvmeof.HostNQN" + + // Listener info. + mdListenerAddress = ".rbd.nvmeof.ListenerAddress" + mdListenerPort = ".rbd.nvmeof.ListenerPort" + mdListenerHostname = ".rbd.nvmeof.ListenerHostname" + + // Gateway management info. + mdGatewayAddress = ".rbd.nvmeof.GatewayAddress" + mdGatewayPort = ".rbd.nvmeof.GatewayPort" +) + +// populateVolumeContext adds NVMe-oF information to volume context for NodeServer. +func populateVolumeContext(volume *csi.Volume, data *nvmeof.NVMeoFVolumeData) { + if volume.VolumeContext == nil { + volume.VolumeContext = make(map[string]string) + } + listenerPortStr := strconv.FormatUint(uint64(data.ListenerInfo.Port), 10) + gatewayManagementInfoPortStr := strconv.FormatUint(uint64(data.GatewayManagementInfo.Port), 10) + + volume.VolumeContext[mdSubsystemNQN] = data.SubsystemNQN + volume.VolumeContext[mdNamespaceID] = strconv.FormatUint(uint64(data.NamespaceID), 10) + volume.VolumeContext[mdHostNQN] = data.HostNQN + volume.VolumeContext[mdListenerAddress] = data.ListenerInfo.Address + volume.VolumeContext[mdListenerPort] = listenerPortStr + volume.VolumeContext[mdListenerHostname] = data.ListenerInfo.Hostname + volume.VolumeContext[mdGatewayAddress] = data.GatewayManagementInfo.Address + volume.VolumeContext[mdGatewayPort] = gatewayManagementInfoPortStr +} + +// storeNVMeoFMetadata stores all NVMe-oF data in RBD volume metadata for cleanup operations. +func (cs *Server) storeNVMeoFMetadata( + ctx context.Context, + req *csi.CreateVolumeRequest, + volumeID string, + nvmeofData *nvmeof.NVMeoFVolumeData, +) error { + // Create RBD manager + mgr := rbdutil.NewManager(cs.backendServer.Driver.GetInstanceID(), nil, req.GetSecrets()) + defer mgr.Destroy(ctx) + + // Get RBD volume + rbdVol, err := mgr.GetVolumeByID(ctx, volumeID) + if err != nil { + return status.Errorf(codes.Aborted, "failed to find volume with ID %q: %v", volumeID, err) + } + defer rbdVol.Destroy(ctx) + + gatewayManagementInfoPortStr := strconv.FormatUint(uint64(nvmeofData.GatewayManagementInfo.Port), 10) + listenerInfoPortStr := strconv.FormatUint(uint64(nvmeofData.ListenerInfo.Port), 10) + + // Prepare all metadata entries + metadata := map[string]string{ + // NVMe-oF resource info + mdSubsystemNQN: nvmeofData.SubsystemNQN, + mdNamespaceID: strconv.FormatUint(uint64(nvmeofData.NamespaceID), 10), + mdHostNQN: nvmeofData.HostNQN, + + // Listener info + mdListenerAddress: nvmeofData.ListenerInfo.Address, + mdListenerPort: listenerInfoPortStr, + mdListenerHostname: nvmeofData.ListenerInfo.Hostname, + + // Gateway management info + mdGatewayAddress: nvmeofData.GatewayManagementInfo.Address, + mdGatewayPort: gatewayManagementInfoPortStr, + } + + // Store all metadata entries + for key, value := range metadata { + if value == "" { + log.WarningLog(ctx, "Skipping empty metadata value for key: %s", key) + + continue + } + + if err := rbdVol.SetMetadata(key, value); err != nil { + log.ErrorLog(ctx, "Failed to store %s metadata: %v", key, err) + + return status.Errorf(codes.Internal, "failed to store %s metadata: %v", key, err) + } + } + + log.DebugLog(ctx, "All NVMe-oF metadata stored successfully for volume: %s", volumeID) + + return nil +} + +// getNVMeoFMetadata retrieves all NVMe-oF data from RBD volume metadata. +func (cs *Server) getNVMeoFMetadata( + ctx context.Context, + req *csi.DeleteVolumeRequest, + volumeID string, +) (*nvmeof.NVMeoFVolumeData, error) { + // Create RBD manager + mgr := rbdutil.NewManager(cs.backendServer.Driver.GetInstanceID(), nil, req.GetSecrets()) + defer mgr.Destroy(ctx) + + // Get RBD volume + rbdVol, err := mgr.GetVolumeByID(ctx, volumeID) + if err != nil { + return nil, fmt.Errorf("failed to find volume with ID %q: %w", volumeID, err) + } + defer rbdVol.Destroy(ctx) + + // Retrieve all metadata including gateway management info + metadata := make(map[string]string) + + // Required metadata keys + requiredKeys := []string{ + mdSubsystemNQN, + mdNamespaceID, + mdHostNQN, + mdListenerAddress, + mdListenerPort, + mdListenerHostname, + mdGatewayAddress, + mdGatewayPort, + } + + // Retrieve all metadata values + for _, key := range requiredKeys { + value, err := rbdVol.GetMetadata(key) + if err != nil { + return nil, fmt.Errorf("failed to get %s: %w", key, err) + } + if value == "" { + return nil, fmt.Errorf("metadata %s is empty", key) + } + metadata[key] = value + } + + // Parse namespace ID + nsid, err := strconv.ParseUint(metadata[mdNamespaceID], 10, 32) + if err != nil { + return nil, fmt.Errorf("invalid namespace ID: %w", err) + } + + listenerInfoPort, err := strconv.ParseUint(metadata[mdListenerPort], 10, 32) + if err != nil { + return nil, fmt.Errorf("invalid listener port: %w", err) + } + gatewayPort, err := strconv.ParseUint(metadata[mdGatewayPort], 10, 32) + if err != nil { + return nil, fmt.Errorf("invalid gateway port: %w", err) + } + // Construct NVMe-oF volume data + nvmeofData := &nvmeof.NVMeoFVolumeData{ + SubsystemNQN: metadata[mdSubsystemNQN], + NamespaceID: uint32(nsid), + HostNQN: metadata[mdHostNQN], + ListenerInfo: nvmeof.ListenerDetails{ + GatewayAddress: nvmeof.GatewayAddress{ + Address: metadata[mdListenerAddress], + Port: uint32(listenerInfoPort), + }, + Hostname: metadata[mdListenerHostname], + }, + // Store gateway management info separately + GatewayManagementInfo: nvmeof.GatewayConfig{ + Address: metadata[mdGatewayAddress], + Port: uint32(gatewayPort), + }, + } + + return nvmeofData, nil +} + +// getGatewayConfigFromRequest extracts gateway configuration from request parameters. +func getGatewayConfigFromRequest(params map[string]string) (*nvmeof.GatewayConfig, error) { + address := params["nvmeofGatewayAddress"] + if address == "" { + return nil, errors.New("nvmeofGatewayAddress parameter is required") + } + + portStr := params["nvmeofGatewayPort"] + + if portStr == "" { + return nil, errors.New("nvmeofGatewayPort parameter is required") + } + + // Convert string to proper port type + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, fmt.Errorf("invalid port %s: %w", portStr, err) + } + + return &nvmeof.GatewayConfig{ + Address: address, + Port: uint32(port), + }, nil +} + +// connectGateway creates and connects a gateway client for this operation. +func connectGateway(ctx context.Context, config *nvmeof.GatewayConfig) (*nvmeof.GatewayRpcClient, error) { + gateway, err := nvmeof.NewGatewayRpcClient(config) + if err != nil { + log.DebugLog(ctx, "failed to connect to management gateway: %s", config) + + return nil, fmt.Errorf("failed to connect to management gateway %s: %w", + config, err) + } + log.DebugLog(ctx, "Connected to the gateway %s", config) + + return gateway, nil +} diff --git a/internal/nvmeof/nvmeof.go b/internal/nvmeof/nvmeof.go new file mode 100644 index 00000000000..e717c605484 --- /dev/null +++ b/internal/nvmeof/nvmeof.go @@ -0,0 +1,418 @@ +/* +Copyright 2025 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nvmeof + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + "syscall" + + pb "github.com/ceph/ceph-nvmeof/lib/go/nvmeof" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/proto" + + "github.com/ceph/ceph-csi/internal/util/log" +) + +// GatewayAddress holds the address and port of the NVMe-oF gateway. +// This is used to connect to the gateway server. +// It is also used to specify the address and port of listeners. +type GatewayAddress struct { + Address string + Port uint32 +} + +func (ga GatewayAddress) String() string { + return fmt.Sprintf("%s:%d", ga.Address, ga.Port) +} + +// ListenerDetails holds the listener information for a subsystem. +type ListenerDetails struct { + GatewayAddress + Hostname string +} + +// Config holds gateway client configuration. +type GatewayConfig = GatewayAddress + +// NVMeoFVolumeData holds the data required for an NVMe-oF volume. +type NVMeoFVolumeData struct { + SubsystemNQN string + NamespaceID uint32 + HostNQN string + ListenerInfo ListenerDetails + GatewayManagementInfo GatewayConfig +} + +// GatewayClient wraps the gRPC client and connection details. +type GatewayRpcClient struct { + config *GatewayConfig + conn *grpc.ClientConn + client pb.GatewayClient // protobuf client + maxSerial *big.Int // Maximum allowed serial number for subsystems. +} + +// TODO: Move this to a util package? +const ( + MaxAllowedSubsystemSerialNumber = 99999999999999 + RandomNumberOffset = 2 // skip reserved identifiers 0 and 1 +) + +// NewGatewayRpcClient creates a new gateway client. +func NewGatewayRpcClient(config *GatewayConfig) (*GatewayRpcClient, error) { + client := &GatewayRpcClient{ + config: config, + maxSerial: big.NewInt(MaxAllowedSubsystemSerialNumber - RandomNumberOffset), + } + + err := client.connect() + if err != nil { + return nil, fmt.Errorf("failed to connect to gateway: %w", err) + } + + return client, nil +} + +// Destroy closes the gateway connection. +func (c *GatewayRpcClient) Destroy() error { + var err error + + // Close connection if it exists + if c.conn != nil { + err = c.conn.Close() + c.conn = nil + } + // Clear client reference + c.client = nil + // Clear config to prevent reuse + c.config = nil + + return err +} + +// CreateNamespace creates a namespace in a subsystem. +func (gw *GatewayRpcClient) CreateNamespace(ctx context.Context, subsystemNQN, poolName, imageName string, +) (uint32, error) { + log.DebugLog(ctx, "Creating namespace for RBD %s/%s in subsystem %s", + poolName, imageName, subsystemNQN) + + req := &pb.NamespaceAddReq{ + SubsystemNqn: subsystemNQN, + RbdPoolName: poolName, + RbdImageName: imageName, + BlockSize: 4096, // TODO: Make this configurable ?? + // Nsid: nil, + // Uuid: nil, + // Anagrpid: nil, + // CreateImage: nil, + // Size: nil, + // Force: nil, + // NoAutoVisible: nil, // TODO: if we use only one subsystem for all volumes, we probably want to set this to true + // TrashImage: nil, + // DisableAutoResize: nil, + // ReadOnly: nil, + } + + resp, err := gw.client.NamespaceAdd(ctx, req) + if err != nil { + return 0, fmt.Errorf("failed to create namespace for %s/%s: %w", poolName, imageName, err) + } + if resp.GetStatus() != 0 { + return 0, fmt.Errorf("gateway NamespaceAdd returned error: %s", resp.GetErrorMessage()) + } + log.DebugLog(ctx, "Namespace created with NSID: %d", resp.GetNsid()) + + return resp.GetNsid(), nil +} + +// DeleteNamespace deletes a namespace from a subsystem. +func (gw *GatewayRpcClient) DeleteNamespace(ctx context.Context, subsystemNQN string, namespaceID uint32) error { + log.DebugLog(ctx, "Deleting namespace %d from subsystem %s", namespaceID, subsystemNQN) + + req := &pb.NamespaceDeleteReq{ + SubsystemNqn: subsystemNQN, + Nsid: namespaceID, + } + + status, err := gw.client.NamespaceDelete(ctx, req) + if err != nil { + return fmt.Errorf("failed to delete namespace %d: %w", namespaceID, err) + } + if status.GetStatus() == 0 { + log.DebugLog(ctx, "Namespace deleted successfully: %d", namespaceID) + + return nil + } + if status.GetStatus() == int32(syscall.ENOENT) { // ENOENT + log.DebugLog(ctx, "Namespace %d already deleted (not found)", namespaceID) + + return nil // Namespace already deleted, no error + } + + return fmt.Errorf("gateway NamespaceDelete returned error (status=%d): %s", + status.GetStatus(), status.GetErrorMessage()) +} + +// CreateSubsystem creates an NVMe-oF subsystem on the gateway. +func (gw *GatewayRpcClient) CreateSubsystem(ctx context.Context, subsystemNQN string) error { + log.DebugLog(ctx, "Creating NVMe subsystem: %s on gateway %s", + subsystemNQN, gw.config) + + serialNumber, err := gw.generateSerialNumber() + if err != nil { + return fmt.Errorf("failed to generate serial number: %w", err) + } + + req := &pb.CreateSubsystemReq{ + SubsystemNqn: subsystemNQN, + SerialNumber: serialNumber, // Generate serial from NQN + EnableHa: true, // Enable HA (seems to be expected always true) + NoGroupAppend: proto.Bool(true), // Do not append gateway group name to the NQN + // MaxNamespaces: nil, // Use gateway default + // DhchapKey: nil, // No authentication + // KeyEncrypted: nil, // No encryption + } + + status, err := gw.client.CreateSubsystem(ctx, req) + if err != nil { + return fmt.Errorf("failed to create subsystem %s: %w", subsystemNQN, err) + } + if status.GetStatus() != 0 { + return fmt.Errorf("gateway CreateSubsystem returned error: %s", status.GetErrorMessage()) + } + + log.DebugLog(ctx, "Subsystem created successfully: %s", status.GetNqn()) + + return nil +} + +// DeleteSubsystem deletes an NVMe-oF subsystem. +func (gw *GatewayRpcClient) DeleteSubsystem(ctx context.Context, subsystemNQN string) error { + log.DebugLog(ctx, "Deleting NVMe subsystem: %s", subsystemNQN) + + req := &pb.DeleteSubsystemReq{ + SubsystemNqn: subsystemNQN, + } + + status, err := gw.client.DeleteSubsystem(ctx, req) + switch { + case err != nil: + return fmt.Errorf("failed to delete subsystem %s: %w", subsystemNQN, err) + case status.GetStatus() == int32(syscall.ENOENT): + // the subsystem was removed already + return nil + case status.GetStatus() != 0: + return fmt.Errorf("gateway DeleteSubsystem returned error: %s", status.GetErrorMessage()) + } + + log.DebugLog(ctx, "Subsystem deleted successfully: %s", subsystemNQN) + + return nil +} + +func (gw *GatewayRpcClient) SubsystemExists(ctx context.Context, subsystemNQN string) (bool, error) { + log.DebugLog(ctx, "Checking if subsystem %s exists on gateway %s", subsystemNQN, gw.config) + + req := &pb.ListSubsystemsReq{ + SubsystemNqn: nil, + } + resp, err := gw.client.ListSubsystems(ctx, req) + if err != nil { + return false, fmt.Errorf("failed to list subsystems: %w", err) + } + if resp.GetStatus() != 0 { + return false, fmt.Errorf("gateway ListSubsystems returned error (status=%d): %s", + resp.GetStatus(), resp.GetErrorMessage()) + } + + for _, sub := range resp.GetSubsystems() { + if sub.GetNqn() == subsystemNQN { + log.DebugLog(ctx, "Subsystem %s exists", subsystemNQN) + + return true, nil + } + } + log.DebugLog(ctx, "Subsystem %s does not exist", subsystemNQN) + + return false, nil +} + +// AddHost adds a host to a subsystem (allows access). +func (gw *GatewayRpcClient) AddHost(ctx context.Context, subsystemNQN, hostNQN string) error { + log.DebugLog(ctx, "Adding host %s to subsystem %s on gateway %s", + hostNQN, subsystemNQN, gw.config) + + req := &pb.AddHostReq{ + SubsystemNqn: subsystemNQN, + HostNqn: hostNQN, + // Psk: nil, // No pre-shared key + // DhchapKey: nil, // No DH-CHAP authentication + // PskEncrypted: nil, // No PSK encryption + // KeyEncrypted: nil, // No key encryption + } + + resp, err := gw.client.AddHost(ctx, req) + if err != nil { + return fmt.Errorf("failed to add host %s to subsystem %s: %w", hostNQN, subsystemNQN, err) + } + if resp.GetStatus() != 0 { + return fmt.Errorf("gateway AddHost returned error (status=%d): %s", resp.GetStatus(), resp.GetErrorMessage()) + } + log.DebugLog(ctx, "Host added successfully: %s to subsystem %s", hostNQN, subsystemNQN) + + return nil +} + +func (gw *GatewayRpcClient) CreateListener(ctx context.Context, subsystemNQN string, listenerInfo ListenerDetails, +) error { + log.DebugLog(ctx, "Adding listener %s to subsystem %s", listenerInfo.Address, subsystemNQN) + adrfam := pb.AddressFamily_ipv4 + req := &pb.CreateListenerReq{ + Nqn: subsystemNQN, + HostName: listenerInfo.Hostname, + Traddr: listenerInfo.Address, + Trsvcid: &listenerInfo.Port, + Adrfam: &adrfam, // Assuming IPv4, can be configurable // TODO - make it configurable + // Secure // false, // Assuming no security for now // TODO - make it configurable? + // VerifyHostName // false, // Assuming no hostname verification for now // TODO - make it configurable + } + + resp, err := gw.client.CreateListener(ctx, req) + if err != nil { + return fmt.Errorf("failed to add listener %s to subsystem %s: %w", listenerInfo.Address, subsystemNQN, err) + } + if resp.GetStatus() != 0 { + return fmt.Errorf("gateway AddListener returned error (status=%d): %s", resp.GetStatus(), resp.GetErrorMessage()) + } + + log.DebugLog(ctx, "Listener added successfully: %s to subsystem %s", listenerInfo.Address, subsystemNQN) + + return nil +} + +// DeleteListener removes a listener from a subsystem. +func (gw *GatewayRpcClient) DeleteListener(ctx context.Context, subsystemNQN string, listenerInfo ListenerDetails, +) error { + log.DebugLog(ctx, "Deleting listener %s from subsystem %s", listenerInfo.Address, subsystemNQN) + adrfam := pb.AddressFamily_ipv4 + // Add this because without this, the gw return + // "Can't verify there are no active connections for this address" instead of "Listener not found" + force := true + req := &pb.DeleteListenerReq{ + Nqn: subsystemNQN, + HostName: listenerInfo.Hostname, + Traddr: listenerInfo.Address, + Trsvcid: &listenerInfo.Port, + Adrfam: &adrfam, // Assuming IPv4, can be configurable // TODO - make it configurable + Force: &force, + } + + resp, err := gw.client.DeleteListener(ctx, req) + if err != nil { + return fmt.Errorf("failed to delete listener %s from subsystem %s: %w", listenerInfo.Address, subsystemNQN, err) + } + if resp.GetStatus() == 0 { + log.DebugLog(ctx, "Listener deleted successfully: %s from subsystem %s", listenerInfo.Address, subsystemNQN) + + return nil + } + if resp.GetStatus() == int32(syscall.ENOENT) { // ENOENT + log.DebugLog(ctx, "Listener %s already deleted from subsystem %s (not found)", listenerInfo.Address, subsystemNQN) + + return nil // Listener already deleted, no error + } + + return fmt.Errorf("gateway DeleteListener returned error (status=%d): %s", resp.GetStatus(), resp.GetErrorMessage()) +} + +// RemoveHost removes a host from a subsystem. +func (gw *GatewayRpcClient) RemoveHost(ctx context.Context, subsystemNQN, hostNQN string) error { + log.DebugLog(ctx, "Removing host %s from subsystem %s", hostNQN, subsystemNQN) + + req := &pb.RemoveHostReq{ + SubsystemNqn: subsystemNQN, + HostNqn: hostNQN, + } + + resp, err := gw.client.RemoveHost(ctx, req) + if err != nil { + return fmt.Errorf("failed to remove host %s from subsystem %s: %w", hostNQN, subsystemNQN, err) + } + if resp.GetStatus() == 0 { + log.DebugLog(ctx, "Host %s removed successfully from subsystem %s", hostNQN, subsystemNQN) + + return nil + } + if resp.GetStatus() == int32(syscall.ENOENT) { // ENOENT + log.DebugLog(ctx, "Host %s already removed from subsystem %s (not found)", hostNQN, subsystemNQN) + + return nil + } + + return fmt.Errorf("gateway RemoveHost returned error (status=%d): %s", resp.GetStatus(), resp.GetErrorMessage()) +} + +// List namespaces in a subsystem. +func (gw *GatewayRpcClient) ListNamespaces(ctx context.Context, subsystemNQN string) (*pb.NamespacesInfo, error) { + log.DebugLog(ctx, "Listing namespaces in subsystem %s", subsystemNQN) + + req := &pb.ListNamespacesReq{ + Subsystem: subsystemNQN, + } + + resp, err := gw.client.ListNamespaces(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to list namespaces in subsystem %s: %w", subsystemNQN, err) + } + if resp.GetStatus() != 0 { + return nil, fmt.Errorf("gateway ListNamespaces returned error: %s", resp.GetErrorMessage()) + } + + log.DebugLog(ctx, "Listed namespaces in subsystem %s successfully", subsystemNQN) + + return resp, nil +} + +// Connect to Gateway gRPC server. +func (c *GatewayRpcClient) connect() error { + // Create connection using new gRPC API + conn, err := grpc.NewClient(c.config.String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + return fmt.Errorf("failed to create Gateway gRPC client: %w", err) + } + c.conn = conn + c.client = pb.NewGatewayClient(conn) + + return nil +} + +// TODO- maybe move to util-nvmeof package? +func (c *GatewayRpcClient) generateSerialNumber() (string, error) { + n, err := rand.Int(rand.Reader, c.maxSerial) + if err != nil { + return "", fmt.Errorf("failed to generate random serial: %w", err) + } + + serial := n.Int64() + RandomNumberOffset + + return fmt.Sprintf("Ceph%d", serial), nil +} From d31426723b35f6cc1f899d538207d13659f194ea Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Tue, 29 Jul 2025 16:08:49 +0300 Subject: [PATCH 002/157] nvmeof: tests: add nvmeof tests nvmeof tests were added. create\delete subsystem, listener and host. Signed-off-by: gadi-didi --- internal/nvmeof/tests/Dockerfile.nvmeof-test | 42 +++++ internal/nvmeof/tests/nvmeof_test.go | 168 +++++++++++++++++++ 2 files changed, 210 insertions(+) create mode 100644 internal/nvmeof/tests/Dockerfile.nvmeof-test create mode 100644 internal/nvmeof/tests/nvmeof_test.go diff --git a/internal/nvmeof/tests/Dockerfile.nvmeof-test b/internal/nvmeof/tests/Dockerfile.nvmeof-test new file mode 100644 index 00000000000..f4012003482 --- /dev/null +++ b/internal/nvmeof/tests/Dockerfile.nvmeof-test @@ -0,0 +1,42 @@ +# Dockerfile.nvmeof-test +FROM golang:1.24-alpine + +# Install required packages +RUN apk add --no-cache \ + git \ + make \ + gcc \ + musl-dev \ + linux-headers \ + netcat-openbsd + +# Set working directory +WORKDIR /app + +# Copy all source code +COPY . . + +COPY < Date: Tue, 29 Jul 2025 16:12:02 +0300 Subject: [PATCH 003/157] ci: add nvmeof as option added "nvmeof" tag option for commits\PR due to new csi driver (nvmeof-csi) Signed-off-by: gadi-didi --- .commitlintrc.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.commitlintrc.yml b/.commitlintrc.yml index f285812d793..9bac849ea03 100644 --- a/.commitlintrc.yml +++ b/.commitlintrc.yml @@ -44,3 +44,4 @@ rules: - revert - util - nfs + - nvmeof From 147ba56d1782d4357ba713b1a084117c4f540971 Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Wed, 13 Aug 2025 09:31:47 +0300 Subject: [PATCH 004/157] nvmeof: added the upstream protobuf files to vendor folder by run "go mod vendor" Signed-off-by: gadi-didi --- .../ceph/ceph-nvmeof/lib/go/nvmeof/LICENSE | 63 + .../ceph/ceph-nvmeof/lib/go/nvmeof/doc.go | 12 + .../ceph-nvmeof/lib/go/nvmeof/gateway.pb.go | 6916 +++++++++++++++++ .../lib/go/nvmeof/gateway_grpc.pb.go | 1412 ++++ vendor/modules.txt | 3 + 5 files changed, 8406 insertions(+) create mode 100644 vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/LICENSE create mode 100644 vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/doc.go create mode 100644 vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/gateway.pb.go create mode 100644 vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/gateway_grpc.pb.go diff --git a/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/LICENSE b/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/LICENSE new file mode 100644 index 00000000000..bff9389b118 --- /dev/null +++ b/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/LICENSE @@ -0,0 +1,63 @@ +GNU LESSER GENERAL PUBLIC LICENSE +Version 3, 29 June 2007 + +Copyright (C) 2007 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. + +0. Additional Definitions. + +As used herein, “this License” refers to version 3 of the GNU Lesser General Public License, and the “GNU GPL” refers to version 3 of the GNU General Public License. + +“The Library” refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. + +An “Application” is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. + +A “Combined Work” is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the “Linked Version”. + +The “Minimal Corresponding Source” for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. + +The “Corresponding Application Code” for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. + +1. Exception to Section 3 of the GNU GPL. + +You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. + +2. Conveying Modified Versions. + +If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: + +a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or +b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. +3. Object Code Incorporating Material from Library Header Files. + +The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: + +a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. +b) Accompany the object code with a copy of the GNU GPL and this license document. +4. Combined Works. + +You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: + +a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. +b) Accompany the Combined Work with a copy of the GNU GPL and this license document. +c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. +d) Do one of the following: +0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. +1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. +e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) +5. Combined Libraries. + +You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: + +a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. +b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. +6. Revised Versions of the GNU Lesser General Public License. + +The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. + +If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. diff --git a/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/doc.go b/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/doc.go new file mode 100644 index 00000000000..dc5743507a3 --- /dev/null +++ b/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/doc.go @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2025 International Business Machines + * All rights reserved. + * + * SPDX-License-Identifier: MIT + * + * Authors: ndevos@ibm.com + */ + +package nvmeof + +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative --proto_path=../../../control/proto ../../../control/proto/gateway.proto diff --git a/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/gateway.pb.go b/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/gateway.pb.go new file mode 100644 index 00000000000..e500313cd69 --- /dev/null +++ b/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/gateway.pb.go @@ -0,0 +1,6916 @@ +// +// Copyright (c) 2021 International Business Machines +// All rights reserved. +// +// SPDX-License-Identifier: MIT +// +// Authors: anita.shekar@ibm.com, sandy.kaur@ibm.com +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v3.21.12 +// source: gateway.proto + +package nvmeof + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AddressFamily int32 + +const ( + AddressFamily_ipv4 AddressFamily = 0 + AddressFamily_ipv6 AddressFamily = 1 +) + +// Enum value maps for AddressFamily. +var ( + AddressFamily_name = map[int32]string{ + 0: "ipv4", + 1: "ipv6", + } + AddressFamily_value = map[string]int32{ + "ipv4": 0, + "ipv6": 1, + } +) + +func (x AddressFamily) Enum() *AddressFamily { + p := new(AddressFamily) + *p = x + return p +} + +func (x AddressFamily) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AddressFamily) Descriptor() protoreflect.EnumDescriptor { + return file_gateway_proto_enumTypes[0].Descriptor() +} + +func (AddressFamily) Type() protoreflect.EnumType { + return &file_gateway_proto_enumTypes[0] +} + +func (x AddressFamily) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AddressFamily.Descriptor instead. +func (AddressFamily) EnumDescriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{0} +} + +type LogLevel int32 + +const ( + LogLevel_ERROR LogLevel = 0 + LogLevel_WARNING LogLevel = 1 + LogLevel_NOTICE LogLevel = 2 + LogLevel_INFO LogLevel = 3 + LogLevel_DEBUG LogLevel = 4 +) + +// Enum value maps for LogLevel. +var ( + LogLevel_name = map[int32]string{ + 0: "ERROR", + 1: "WARNING", + 2: "NOTICE", + 3: "INFO", + 4: "DEBUG", + } + LogLevel_value = map[string]int32{ + "ERROR": 0, + "WARNING": 1, + "NOTICE": 2, + "INFO": 3, + "DEBUG": 4, + } +) + +func (x LogLevel) Enum() *LogLevel { + p := new(LogLevel) + *p = x + return p +} + +func (x LogLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LogLevel) Descriptor() protoreflect.EnumDescriptor { + return file_gateway_proto_enumTypes[1].Descriptor() +} + +func (LogLevel) Type() protoreflect.EnumType { + return &file_gateway_proto_enumTypes[1] +} + +func (x LogLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LogLevel.Descriptor instead. +func (LogLevel) EnumDescriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{1} +} + +type GwLogLevel int32 + +const ( + GwLogLevel_notset GwLogLevel = 0 + GwLogLevel_debug GwLogLevel = 10 + GwLogLevel_info GwLogLevel = 20 + GwLogLevel_warning GwLogLevel = 30 + GwLogLevel_error GwLogLevel = 40 + GwLogLevel_critical GwLogLevel = 50 +) + +// Enum value maps for GwLogLevel. +var ( + GwLogLevel_name = map[int32]string{ + 0: "notset", + 10: "debug", + 20: "info", + 30: "warning", + 40: "error", + 50: "critical", + } + GwLogLevel_value = map[string]int32{ + "notset": 0, + "debug": 10, + "info": 20, + "warning": 30, + "error": 40, + "critical": 50, + } +) + +func (x GwLogLevel) Enum() *GwLogLevel { + p := new(GwLogLevel) + *p = x + return p +} + +func (x GwLogLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GwLogLevel) Descriptor() protoreflect.EnumDescriptor { + return file_gateway_proto_enumTypes[2].Descriptor() +} + +func (GwLogLevel) Type() protoreflect.EnumType { + return &file_gateway_proto_enumTypes[2] +} + +func (x GwLogLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GwLogLevel.Descriptor instead. +func (GwLogLevel) EnumDescriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{2} +} + +// From https://nvmexpress.org/wp-content/uploads/NVM-Express-1_4-2019.06.10-Ratified.pdf page 138 +// Asymmetric Namespace Access state for all namespaces in this ANA +// Group when accessed through this controller. +// Value Description Reference +// 01h ANA Optimized state 8.20.3.1 +// 02h ANA Non-Optimized state 8.20.3.2 +// 03h ANA Inaccessible state 8.20.3.3 +// 04h ANA Persistent Loss state 8.20.3.4 +// 0Fh ANA Change state 8.20.3.5 +// All others Reserved +type AnaState int32 + +const ( + AnaState_UNSET AnaState = 0 + AnaState_OPTIMIZED AnaState = 1 + AnaState_NON_OPTIMIZED AnaState = 2 + AnaState_INACCESSIBLE AnaState = 3 +) + +// Enum value maps for AnaState. +var ( + AnaState_name = map[int32]string{ + 0: "UNSET", + 1: "OPTIMIZED", + 2: "NON_OPTIMIZED", + 3: "INACCESSIBLE", + } + AnaState_value = map[string]int32{ + "UNSET": 0, + "OPTIMIZED": 1, + "NON_OPTIMIZED": 2, + "INACCESSIBLE": 3, + } +) + +func (x AnaState) Enum() *AnaState { + p := new(AnaState) + *p = x + return p +} + +func (x AnaState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AnaState) Descriptor() protoreflect.EnumDescriptor { + return file_gateway_proto_enumTypes[3].Descriptor() +} + +func (AnaState) Type() protoreflect.EnumType { + return &file_gateway_proto_enumTypes[3] +} + +func (x AnaState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AnaState.Descriptor instead. +func (AnaState) EnumDescriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{3} +} + +type NamespaceAddReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RbdPoolName string `protobuf:"bytes,1,opt,name=rbd_pool_name,json=rbdPoolName,proto3" json:"rbd_pool_name,omitempty"` + RbdImageName string `protobuf:"bytes,2,opt,name=rbd_image_name,json=rbdImageName,proto3" json:"rbd_image_name,omitempty"` + SubsystemNqn string `protobuf:"bytes,3,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid *uint32 `protobuf:"varint,4,opt,name=nsid,proto3,oneof" json:"nsid,omitempty"` + BlockSize uint32 `protobuf:"varint,5,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"` + Uuid *string `protobuf:"bytes,6,opt,name=uuid,proto3,oneof" json:"uuid,omitempty"` + Anagrpid *int32 `protobuf:"varint,7,opt,name=anagrpid,proto3,oneof" json:"anagrpid,omitempty"` + CreateImage *bool `protobuf:"varint,8,opt,name=create_image,json=createImage,proto3,oneof" json:"create_image,omitempty"` + Size *uint64 `protobuf:"varint,9,opt,name=size,proto3,oneof" json:"size,omitempty"` + Force *bool `protobuf:"varint,10,opt,name=force,proto3,oneof" json:"force,omitempty"` + NoAutoVisible *bool `protobuf:"varint,11,opt,name=no_auto_visible,json=noAutoVisible,proto3,oneof" json:"no_auto_visible,omitempty"` + TrashImage *bool `protobuf:"varint,12,opt,name=trash_image,json=trashImage,proto3,oneof" json:"trash_image,omitempty"` + DisableAutoResize *bool `protobuf:"varint,13,opt,name=disable_auto_resize,json=disableAutoResize,proto3,oneof" json:"disable_auto_resize,omitempty"` + ReadOnly *bool `protobuf:"varint,14,opt,name=read_only,json=readOnly,proto3,oneof" json:"read_only,omitempty"` +} + +func (x *NamespaceAddReq) Reset() { + *x = NamespaceAddReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceAddReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceAddReq) ProtoMessage() {} + +func (x *NamespaceAddReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceAddReq.ProtoReflect.Descriptor instead. +func (*NamespaceAddReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{0} +} + +func (x *NamespaceAddReq) GetRbdPoolName() string { + if x != nil { + return x.RbdPoolName + } + return "" +} + +func (x *NamespaceAddReq) GetRbdImageName() string { + if x != nil { + return x.RbdImageName + } + return "" +} + +func (x *NamespaceAddReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceAddReq) GetNsid() uint32 { + if x != nil && x.Nsid != nil { + return *x.Nsid + } + return 0 +} + +func (x *NamespaceAddReq) GetBlockSize() uint32 { + if x != nil { + return x.BlockSize + } + return 0 +} + +func (x *NamespaceAddReq) GetUuid() string { + if x != nil && x.Uuid != nil { + return *x.Uuid + } + return "" +} + +func (x *NamespaceAddReq) GetAnagrpid() int32 { + if x != nil && x.Anagrpid != nil { + return *x.Anagrpid + } + return 0 +} + +func (x *NamespaceAddReq) GetCreateImage() bool { + if x != nil && x.CreateImage != nil { + return *x.CreateImage + } + return false +} + +func (x *NamespaceAddReq) GetSize() uint64 { + if x != nil && x.Size != nil { + return *x.Size + } + return 0 +} + +func (x *NamespaceAddReq) GetForce() bool { + if x != nil && x.Force != nil { + return *x.Force + } + return false +} + +func (x *NamespaceAddReq) GetNoAutoVisible() bool { + if x != nil && x.NoAutoVisible != nil { + return *x.NoAutoVisible + } + return false +} + +func (x *NamespaceAddReq) GetTrashImage() bool { + if x != nil && x.TrashImage != nil { + return *x.TrashImage + } + return false +} + +func (x *NamespaceAddReq) GetDisableAutoResize() bool { + if x != nil && x.DisableAutoResize != nil { + return *x.DisableAutoResize + } + return false +} + +func (x *NamespaceAddReq) GetReadOnly() bool { + if x != nil && x.ReadOnly != nil { + return *x.ReadOnly + } + return false +} + +type NamespaceResizeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + OBSOLETEUuid *string `protobuf:"bytes,3,opt,name=OBSOLETE_uuid,json=OBSOLETEUuid,proto3,oneof" json:"OBSOLETE_uuid,omitempty"` + NewSize uint64 `protobuf:"varint,4,opt,name=new_size,json=newSize,proto3" json:"new_size,omitempty"` +} + +func (x *NamespaceResizeReq) Reset() { + *x = NamespaceResizeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceResizeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceResizeReq) ProtoMessage() {} + +func (x *NamespaceResizeReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceResizeReq.ProtoReflect.Descriptor instead. +func (*NamespaceResizeReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{1} +} + +func (x *NamespaceResizeReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceResizeReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceResizeReq) GetOBSOLETEUuid() string { + if x != nil && x.OBSOLETEUuid != nil { + return *x.OBSOLETEUuid + } + return "" +} + +func (x *NamespaceResizeReq) GetNewSize() uint64 { + if x != nil { + return x.NewSize + } + return 0 +} + +type NamespaceGetIoStatsReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + OBSOLETEUuid *string `protobuf:"bytes,3,opt,name=OBSOLETE_uuid,json=OBSOLETEUuid,proto3,oneof" json:"OBSOLETE_uuid,omitempty"` +} + +func (x *NamespaceGetIoStatsReq) Reset() { + *x = NamespaceGetIoStatsReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceGetIoStatsReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceGetIoStatsReq) ProtoMessage() {} + +func (x *NamespaceGetIoStatsReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceGetIoStatsReq.ProtoReflect.Descriptor instead. +func (*NamespaceGetIoStatsReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{2} +} + +func (x *NamespaceGetIoStatsReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceGetIoStatsReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceGetIoStatsReq) GetOBSOLETEUuid() string { + if x != nil && x.OBSOLETEUuid != nil { + return *x.OBSOLETEUuid + } + return "" +} + +type NamespaceSetQosReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + OBSOLETEUuid *string `protobuf:"bytes,3,opt,name=OBSOLETE_uuid,json=OBSOLETEUuid,proto3,oneof" json:"OBSOLETE_uuid,omitempty"` + RwIosPerSecond *uint64 `protobuf:"varint,4,opt,name=rw_ios_per_second,json=rwIosPerSecond,proto3,oneof" json:"rw_ios_per_second,omitempty"` + RwMbytesPerSecond *uint64 `protobuf:"varint,5,opt,name=rw_mbytes_per_second,json=rwMbytesPerSecond,proto3,oneof" json:"rw_mbytes_per_second,omitempty"` + RMbytesPerSecond *uint64 `protobuf:"varint,6,opt,name=r_mbytes_per_second,json=rMbytesPerSecond,proto3,oneof" json:"r_mbytes_per_second,omitempty"` + WMbytesPerSecond *uint64 `protobuf:"varint,7,opt,name=w_mbytes_per_second,json=wMbytesPerSecond,proto3,oneof" json:"w_mbytes_per_second,omitempty"` + Force *bool `protobuf:"varint,8,opt,name=force,proto3,oneof" json:"force,omitempty"` +} + +func (x *NamespaceSetQosReq) Reset() { + *x = NamespaceSetQosReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceSetQosReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceSetQosReq) ProtoMessage() {} + +func (x *NamespaceSetQosReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceSetQosReq.ProtoReflect.Descriptor instead. +func (*NamespaceSetQosReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{3} +} + +func (x *NamespaceSetQosReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceSetQosReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceSetQosReq) GetOBSOLETEUuid() string { + if x != nil && x.OBSOLETEUuid != nil { + return *x.OBSOLETEUuid + } + return "" +} + +func (x *NamespaceSetQosReq) GetRwIosPerSecond() uint64 { + if x != nil && x.RwIosPerSecond != nil { + return *x.RwIosPerSecond + } + return 0 +} + +func (x *NamespaceSetQosReq) GetRwMbytesPerSecond() uint64 { + if x != nil && x.RwMbytesPerSecond != nil { + return *x.RwMbytesPerSecond + } + return 0 +} + +func (x *NamespaceSetQosReq) GetRMbytesPerSecond() uint64 { + if x != nil && x.RMbytesPerSecond != nil { + return *x.RMbytesPerSecond + } + return 0 +} + +func (x *NamespaceSetQosReq) GetWMbytesPerSecond() uint64 { + if x != nil && x.WMbytesPerSecond != nil { + return *x.WMbytesPerSecond + } + return 0 +} + +func (x *NamespaceSetQosReq) GetForce() bool { + if x != nil && x.Force != nil { + return *x.Force + } + return false +} + +type NamespaceChangeLoadBalancingGroupReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + OBSOLETEUuid *string `protobuf:"bytes,3,opt,name=OBSOLETE_uuid,json=OBSOLETEUuid,proto3,oneof" json:"OBSOLETE_uuid,omitempty"` + Anagrpid int32 `protobuf:"varint,4,opt,name=anagrpid,proto3" json:"anagrpid,omitempty"` + AutoLbLogic *bool `protobuf:"varint,5,opt,name=auto_lb_logic,json=autoLbLogic,proto3,oneof" json:"auto_lb_logic,omitempty"` +} + +func (x *NamespaceChangeLoadBalancingGroupReq) Reset() { + *x = NamespaceChangeLoadBalancingGroupReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceChangeLoadBalancingGroupReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceChangeLoadBalancingGroupReq) ProtoMessage() {} + +func (x *NamespaceChangeLoadBalancingGroupReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceChangeLoadBalancingGroupReq.ProtoReflect.Descriptor instead. +func (*NamespaceChangeLoadBalancingGroupReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{4} +} + +func (x *NamespaceChangeLoadBalancingGroupReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceChangeLoadBalancingGroupReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceChangeLoadBalancingGroupReq) GetOBSOLETEUuid() string { + if x != nil && x.OBSOLETEUuid != nil { + return *x.OBSOLETEUuid + } + return "" +} + +func (x *NamespaceChangeLoadBalancingGroupReq) GetAnagrpid() int32 { + if x != nil { + return x.Anagrpid + } + return 0 +} + +func (x *NamespaceChangeLoadBalancingGroupReq) GetAutoLbLogic() bool { + if x != nil && x.AutoLbLogic != nil { + return *x.AutoLbLogic + } + return false +} + +type NamespaceChangeVisibilityReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + AutoVisible bool `protobuf:"varint,3,opt,name=auto_visible,json=autoVisible,proto3" json:"auto_visible,omitempty"` + Force *bool `protobuf:"varint,4,opt,name=force,proto3,oneof" json:"force,omitempty"` +} + +func (x *NamespaceChangeVisibilityReq) Reset() { + *x = NamespaceChangeVisibilityReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceChangeVisibilityReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceChangeVisibilityReq) ProtoMessage() {} + +func (x *NamespaceChangeVisibilityReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceChangeVisibilityReq.ProtoReflect.Descriptor instead. +func (*NamespaceChangeVisibilityReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{5} +} + +func (x *NamespaceChangeVisibilityReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceChangeVisibilityReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceChangeVisibilityReq) GetAutoVisible() bool { + if x != nil { + return x.AutoVisible + } + return false +} + +func (x *NamespaceChangeVisibilityReq) GetForce() bool { + if x != nil && x.Force != nil { + return *x.Force + } + return false +} + +type NamespaceSetRbdTrashImageReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + TrashImage bool `protobuf:"varint,3,opt,name=trash_image,json=trashImage,proto3" json:"trash_image,omitempty"` +} + +func (x *NamespaceSetRbdTrashImageReq) Reset() { + *x = NamespaceSetRbdTrashImageReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceSetRbdTrashImageReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceSetRbdTrashImageReq) ProtoMessage() {} + +func (x *NamespaceSetRbdTrashImageReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceSetRbdTrashImageReq.ProtoReflect.Descriptor instead. +func (*NamespaceSetRbdTrashImageReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{6} +} + +func (x *NamespaceSetRbdTrashImageReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceSetRbdTrashImageReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceSetRbdTrashImageReq) GetTrashImage() bool { + if x != nil { + return x.TrashImage + } + return false +} + +type NamespaceSetAutoResizeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + AutoResize bool `protobuf:"varint,3,opt,name=auto_resize,json=autoResize,proto3" json:"auto_resize,omitempty"` +} + +func (x *NamespaceSetAutoResizeReq) Reset() { + *x = NamespaceSetAutoResizeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceSetAutoResizeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceSetAutoResizeReq) ProtoMessage() {} + +func (x *NamespaceSetAutoResizeReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceSetAutoResizeReq.ProtoReflect.Descriptor instead. +func (*NamespaceSetAutoResizeReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{7} +} + +func (x *NamespaceSetAutoResizeReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceSetAutoResizeReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceSetAutoResizeReq) GetAutoResize() bool { + if x != nil { + return x.AutoResize + } + return false +} + +type NamespaceDeleteReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + OBSOLETEUuid *string `protobuf:"bytes,3,opt,name=OBSOLETE_uuid,json=OBSOLETEUuid,proto3,oneof" json:"OBSOLETE_uuid,omitempty"` + IAmSure *bool `protobuf:"varint,4,opt,name=i_am_sure,json=iAmSure,proto3,oneof" json:"i_am_sure,omitempty"` +} + +func (x *NamespaceDeleteReq) Reset() { + *x = NamespaceDeleteReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceDeleteReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceDeleteReq) ProtoMessage() {} + +func (x *NamespaceDeleteReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceDeleteReq.ProtoReflect.Descriptor instead. +func (*NamespaceDeleteReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{8} +} + +func (x *NamespaceDeleteReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceDeleteReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceDeleteReq) GetOBSOLETEUuid() string { + if x != nil && x.OBSOLETEUuid != nil { + return *x.OBSOLETEUuid + } + return "" +} + +func (x *NamespaceDeleteReq) GetIAmSure() bool { + if x != nil && x.IAmSure != nil { + return *x.IAmSure + } + return false +} + +type NamespaceAddHostReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + HostNqn string `protobuf:"bytes,3,opt,name=host_nqn,json=hostNqn,proto3" json:"host_nqn,omitempty"` + Force *bool `protobuf:"varint,4,opt,name=force,proto3,oneof" json:"force,omitempty"` +} + +func (x *NamespaceAddHostReq) Reset() { + *x = NamespaceAddHostReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceAddHostReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceAddHostReq) ProtoMessage() {} + +func (x *NamespaceAddHostReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceAddHostReq.ProtoReflect.Descriptor instead. +func (*NamespaceAddHostReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{9} +} + +func (x *NamespaceAddHostReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceAddHostReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceAddHostReq) GetHostNqn() string { + if x != nil { + return x.HostNqn + } + return "" +} + +func (x *NamespaceAddHostReq) GetForce() bool { + if x != nil && x.Force != nil { + return *x.Force + } + return false +} + +type NamespaceDeleteHostReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,2,opt,name=nsid,proto3" json:"nsid,omitempty"` + HostNqn string `protobuf:"bytes,3,opt,name=host_nqn,json=hostNqn,proto3" json:"host_nqn,omitempty"` +} + +func (x *NamespaceDeleteHostReq) Reset() { + *x = NamespaceDeleteHostReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceDeleteHostReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceDeleteHostReq) ProtoMessage() {} + +func (x *NamespaceDeleteHostReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceDeleteHostReq.ProtoReflect.Descriptor instead. +func (*NamespaceDeleteHostReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{10} +} + +func (x *NamespaceDeleteHostReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceDeleteHostReq) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceDeleteHostReq) GetHostNqn() string { + if x != nil { + return x.HostNqn + } + return "" +} + +type CreateSubsystemReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + SerialNumber string `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"` + MaxNamespaces *uint32 `protobuf:"varint,3,opt,name=max_namespaces,json=maxNamespaces,proto3,oneof" json:"max_namespaces,omitempty"` + EnableHa bool `protobuf:"varint,4,opt,name=enable_ha,json=enableHa,proto3" json:"enable_ha,omitempty"` + NoGroupAppend *bool `protobuf:"varint,5,opt,name=no_group_append,json=noGroupAppend,proto3,oneof" json:"no_group_append,omitempty"` + DhchapKey *string `protobuf:"bytes,6,opt,name=dhchap_key,json=dhchapKey,proto3,oneof" json:"dhchap_key,omitempty"` + KeyEncrypted *bool `protobuf:"varint,7,opt,name=key_encrypted,json=keyEncrypted,proto3,oneof" json:"key_encrypted,omitempty"` +} + +func (x *CreateSubsystemReq) Reset() { + *x = CreateSubsystemReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSubsystemReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSubsystemReq) ProtoMessage() {} + +func (x *CreateSubsystemReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSubsystemReq.ProtoReflect.Descriptor instead. +func (*CreateSubsystemReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{11} +} + +func (x *CreateSubsystemReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *CreateSubsystemReq) GetSerialNumber() string { + if x != nil { + return x.SerialNumber + } + return "" +} + +func (x *CreateSubsystemReq) GetMaxNamespaces() uint32 { + if x != nil && x.MaxNamespaces != nil { + return *x.MaxNamespaces + } + return 0 +} + +func (x *CreateSubsystemReq) GetEnableHa() bool { + if x != nil { + return x.EnableHa + } + return false +} + +func (x *CreateSubsystemReq) GetNoGroupAppend() bool { + if x != nil && x.NoGroupAppend != nil { + return *x.NoGroupAppend + } + return false +} + +func (x *CreateSubsystemReq) GetDhchapKey() string { + if x != nil && x.DhchapKey != nil { + return *x.DhchapKey + } + return "" +} + +func (x *CreateSubsystemReq) GetKeyEncrypted() bool { + if x != nil && x.KeyEncrypted != nil { + return *x.KeyEncrypted + } + return false +} + +type DeleteSubsystemReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Force *bool `protobuf:"varint,2,opt,name=force,proto3,oneof" json:"force,omitempty"` +} + +func (x *DeleteSubsystemReq) Reset() { + *x = DeleteSubsystemReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteSubsystemReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSubsystemReq) ProtoMessage() {} + +func (x *DeleteSubsystemReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSubsystemReq.ProtoReflect.Descriptor instead. +func (*DeleteSubsystemReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{12} +} + +func (x *DeleteSubsystemReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *DeleteSubsystemReq) GetForce() bool { + if x != nil && x.Force != nil { + return *x.Force + } + return false +} + +type ChangeSubsystemKeyReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + DhchapKey *string `protobuf:"bytes,2,opt,name=dhchap_key,json=dhchapKey,proto3,oneof" json:"dhchap_key,omitempty"` +} + +func (x *ChangeSubsystemKeyReq) Reset() { + *x = ChangeSubsystemKeyReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChangeSubsystemKeyReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChangeSubsystemKeyReq) ProtoMessage() {} + +func (x *ChangeSubsystemKeyReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChangeSubsystemKeyReq.ProtoReflect.Descriptor instead. +func (*ChangeSubsystemKeyReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{13} +} + +func (x *ChangeSubsystemKeyReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *ChangeSubsystemKeyReq) GetDhchapKey() string { + if x != nil && x.DhchapKey != nil { + return *x.DhchapKey + } + return "" +} + +type ListNamespacesReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"` + Nsid *uint32 `protobuf:"varint,2,opt,name=nsid,proto3,oneof" json:"nsid,omitempty"` + Uuid *string `protobuf:"bytes,3,opt,name=uuid,proto3,oneof" json:"uuid,omitempty"` +} + +func (x *ListNamespacesReq) Reset() { + *x = ListNamespacesReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNamespacesReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNamespacesReq) ProtoMessage() {} + +func (x *ListNamespacesReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNamespacesReq.ProtoReflect.Descriptor instead. +func (*ListNamespacesReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{14} +} + +func (x *ListNamespacesReq) GetSubsystem() string { + if x != nil { + return x.Subsystem + } + return "" +} + +func (x *ListNamespacesReq) GetNsid() uint32 { + if x != nil && x.Nsid != nil { + return *x.Nsid + } + return 0 +} + +func (x *ListNamespacesReq) GetUuid() string { + if x != nil && x.Uuid != nil { + return *x.Uuid + } + return "" +} + +type AddHostReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + HostNqn string `protobuf:"bytes,2,opt,name=host_nqn,json=hostNqn,proto3" json:"host_nqn,omitempty"` + Psk *string `protobuf:"bytes,3,opt,name=psk,proto3,oneof" json:"psk,omitempty"` + DhchapKey *string `protobuf:"bytes,4,opt,name=dhchap_key,json=dhchapKey,proto3,oneof" json:"dhchap_key,omitempty"` + PskEncrypted *bool `protobuf:"varint,5,opt,name=psk_encrypted,json=pskEncrypted,proto3,oneof" json:"psk_encrypted,omitempty"` + KeyEncrypted *bool `protobuf:"varint,6,opt,name=key_encrypted,json=keyEncrypted,proto3,oneof" json:"key_encrypted,omitempty"` +} + +func (x *AddHostReq) Reset() { + *x = AddHostReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddHostReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddHostReq) ProtoMessage() {} + +func (x *AddHostReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddHostReq.ProtoReflect.Descriptor instead. +func (*AddHostReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{15} +} + +func (x *AddHostReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *AddHostReq) GetHostNqn() string { + if x != nil { + return x.HostNqn + } + return "" +} + +func (x *AddHostReq) GetPsk() string { + if x != nil && x.Psk != nil { + return *x.Psk + } + return "" +} + +func (x *AddHostReq) GetDhchapKey() string { + if x != nil && x.DhchapKey != nil { + return *x.DhchapKey + } + return "" +} + +func (x *AddHostReq) GetPskEncrypted() bool { + if x != nil && x.PskEncrypted != nil { + return *x.PskEncrypted + } + return false +} + +func (x *AddHostReq) GetKeyEncrypted() bool { + if x != nil && x.KeyEncrypted != nil { + return *x.KeyEncrypted + } + return false +} + +type ChangeHostKeyReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + HostNqn string `protobuf:"bytes,2,opt,name=host_nqn,json=hostNqn,proto3" json:"host_nqn,omitempty"` + DhchapKey *string `protobuf:"bytes,3,opt,name=dhchap_key,json=dhchapKey,proto3,oneof" json:"dhchap_key,omitempty"` +} + +func (x *ChangeHostKeyReq) Reset() { + *x = ChangeHostKeyReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChangeHostKeyReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChangeHostKeyReq) ProtoMessage() {} + +func (x *ChangeHostKeyReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChangeHostKeyReq.ProtoReflect.Descriptor instead. +func (*ChangeHostKeyReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{16} +} + +func (x *ChangeHostKeyReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *ChangeHostKeyReq) GetHostNqn() string { + if x != nil { + return x.HostNqn + } + return "" +} + +func (x *ChangeHostKeyReq) GetDhchapKey() string { + if x != nil && x.DhchapKey != nil { + return *x.DhchapKey + } + return "" +} + +type RemoveHostReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + HostNqn string `protobuf:"bytes,2,opt,name=host_nqn,json=hostNqn,proto3" json:"host_nqn,omitempty"` +} + +func (x *RemoveHostReq) Reset() { + *x = RemoveHostReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoveHostReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveHostReq) ProtoMessage() {} + +func (x *RemoveHostReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveHostReq.ProtoReflect.Descriptor instead. +func (*RemoveHostReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{17} +} + +func (x *RemoveHostReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *RemoveHostReq) GetHostNqn() string { + if x != nil { + return x.HostNqn + } + return "" +} + +type ListHostsReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"` + ClearAlerts *bool `protobuf:"varint,2,opt,name=clear_alerts,json=clearAlerts,proto3,oneof" json:"clear_alerts,omitempty"` +} + +func (x *ListHostsReq) Reset() { + *x = ListHostsReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListHostsReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListHostsReq) ProtoMessage() {} + +func (x *ListHostsReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListHostsReq.ProtoReflect.Descriptor instead. +func (*ListHostsReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{18} +} + +func (x *ListHostsReq) GetSubsystem() string { + if x != nil { + return x.Subsystem + } + return "" +} + +func (x *ListHostsReq) GetClearAlerts() bool { + if x != nil && x.ClearAlerts != nil { + return *x.ClearAlerts + } + return false +} + +type ListConnectionsReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"` +} + +func (x *ListConnectionsReq) Reset() { + *x = ListConnectionsReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListConnectionsReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListConnectionsReq) ProtoMessage() {} + +func (x *ListConnectionsReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListConnectionsReq.ProtoReflect.Descriptor instead. +func (*ListConnectionsReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{19} +} + +func (x *ListConnectionsReq) GetSubsystem() string { + if x != nil { + return x.Subsystem + } + return "" +} + +type CreateListenerReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nqn string `protobuf:"bytes,1,opt,name=nqn,proto3" json:"nqn,omitempty"` + HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + Traddr string `protobuf:"bytes,3,opt,name=traddr,proto3" json:"traddr,omitempty"` + Adrfam *AddressFamily `protobuf:"varint,5,opt,name=adrfam,proto3,enum=AddressFamily,oneof" json:"adrfam,omitempty"` + Trsvcid *uint32 `protobuf:"varint,6,opt,name=trsvcid,proto3,oneof" json:"trsvcid,omitempty"` + Secure *bool `protobuf:"varint,7,opt,name=secure,proto3,oneof" json:"secure,omitempty"` + VerifyHostName *bool `protobuf:"varint,8,opt,name=verify_host_name,json=verifyHostName,proto3,oneof" json:"verify_host_name,omitempty"` +} + +func (x *CreateListenerReq) Reset() { + *x = CreateListenerReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateListenerReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateListenerReq) ProtoMessage() {} + +func (x *CreateListenerReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateListenerReq.ProtoReflect.Descriptor instead. +func (*CreateListenerReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{20} +} + +func (x *CreateListenerReq) GetNqn() string { + if x != nil { + return x.Nqn + } + return "" +} + +func (x *CreateListenerReq) GetHostName() string { + if x != nil { + return x.HostName + } + return "" +} + +func (x *CreateListenerReq) GetTraddr() string { + if x != nil { + return x.Traddr + } + return "" +} + +func (x *CreateListenerReq) GetAdrfam() AddressFamily { + if x != nil && x.Adrfam != nil { + return *x.Adrfam + } + return AddressFamily_ipv4 +} + +func (x *CreateListenerReq) GetTrsvcid() uint32 { + if x != nil && x.Trsvcid != nil { + return *x.Trsvcid + } + return 0 +} + +func (x *CreateListenerReq) GetSecure() bool { + if x != nil && x.Secure != nil { + return *x.Secure + } + return false +} + +func (x *CreateListenerReq) GetVerifyHostName() bool { + if x != nil && x.VerifyHostName != nil { + return *x.VerifyHostName + } + return false +} + +type DeleteListenerReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nqn string `protobuf:"bytes,1,opt,name=nqn,proto3" json:"nqn,omitempty"` + HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + Traddr string `protobuf:"bytes,3,opt,name=traddr,proto3" json:"traddr,omitempty"` + Adrfam *AddressFamily `protobuf:"varint,5,opt,name=adrfam,proto3,enum=AddressFamily,oneof" json:"adrfam,omitempty"` + Trsvcid *uint32 `protobuf:"varint,6,opt,name=trsvcid,proto3,oneof" json:"trsvcid,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,proto3,oneof" json:"force,omitempty"` +} + +func (x *DeleteListenerReq) Reset() { + *x = DeleteListenerReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteListenerReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteListenerReq) ProtoMessage() {} + +func (x *DeleteListenerReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteListenerReq.ProtoReflect.Descriptor instead. +func (*DeleteListenerReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{21} +} + +func (x *DeleteListenerReq) GetNqn() string { + if x != nil { + return x.Nqn + } + return "" +} + +func (x *DeleteListenerReq) GetHostName() string { + if x != nil { + return x.HostName + } + return "" +} + +func (x *DeleteListenerReq) GetTraddr() string { + if x != nil { + return x.Traddr + } + return "" +} + +func (x *DeleteListenerReq) GetAdrfam() AddressFamily { + if x != nil && x.Adrfam != nil { + return *x.Adrfam + } + return AddressFamily_ipv4 +} + +func (x *DeleteListenerReq) GetTrsvcid() uint32 { + if x != nil && x.Trsvcid != nil { + return *x.Trsvcid + } + return 0 +} + +func (x *DeleteListenerReq) GetForce() bool { + if x != nil && x.Force != nil { + return *x.Force + } + return false +} + +type ListListenersReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"` +} + +func (x *ListListenersReq) Reset() { + *x = ListListenersReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListListenersReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListListenersReq) ProtoMessage() {} + +func (x *ListListenersReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListListenersReq.ProtoReflect.Descriptor instead. +func (*ListListenersReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{22} +} + +func (x *ListListenersReq) GetSubsystem() string { + if x != nil { + return x.Subsystem + } + return "" +} + +type ListSubsystemsReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn *string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3,oneof" json:"subsystem_nqn,omitempty"` + SerialNumber *string `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3,oneof" json:"serial_number,omitempty"` +} + +func (x *ListSubsystemsReq) Reset() { + *x = ListSubsystemsReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSubsystemsReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSubsystemsReq) ProtoMessage() {} + +func (x *ListSubsystemsReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSubsystemsReq.ProtoReflect.Descriptor instead. +func (*ListSubsystemsReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{23} +} + +func (x *ListSubsystemsReq) GetSubsystemNqn() string { + if x != nil && x.SubsystemNqn != nil { + return *x.SubsystemNqn + } + return "" +} + +func (x *ListSubsystemsReq) GetSerialNumber() string { + if x != nil && x.SerialNumber != nil { + return *x.SerialNumber + } + return "" +} + +type GetSubsystemsReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetSubsystemsReq) Reset() { + *x = GetSubsystemsReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSubsystemsReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubsystemsReq) ProtoMessage() {} + +func (x *GetSubsystemsReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubsystemsReq.ProtoReflect.Descriptor instead. +func (*GetSubsystemsReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{24} +} + +type GetSpdkNvmfLogFlagsAndLevelReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AllLogFlags *bool `protobuf:"varint,1,opt,name=all_log_flags,json=allLogFlags,proto3,oneof" json:"all_log_flags,omitempty"` +} + +func (x *GetSpdkNvmfLogFlagsAndLevelReq) Reset() { + *x = GetSpdkNvmfLogFlagsAndLevelReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSpdkNvmfLogFlagsAndLevelReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSpdkNvmfLogFlagsAndLevelReq) ProtoMessage() {} + +func (x *GetSpdkNvmfLogFlagsAndLevelReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSpdkNvmfLogFlagsAndLevelReq.ProtoReflect.Descriptor instead. +func (*GetSpdkNvmfLogFlagsAndLevelReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{25} +} + +func (x *GetSpdkNvmfLogFlagsAndLevelReq) GetAllLogFlags() bool { + if x != nil && x.AllLogFlags != nil { + return *x.AllLogFlags + } + return false +} + +type DisableSpdkNvmfLogsReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExtraLogFlags []string `protobuf:"bytes,1,rep,name=extra_log_flags,json=extraLogFlags,proto3" json:"extra_log_flags,omitempty"` +} + +func (x *DisableSpdkNvmfLogsReq) Reset() { + *x = DisableSpdkNvmfLogsReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisableSpdkNvmfLogsReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisableSpdkNvmfLogsReq) ProtoMessage() {} + +func (x *DisableSpdkNvmfLogsReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisableSpdkNvmfLogsReq.ProtoReflect.Descriptor instead. +func (*DisableSpdkNvmfLogsReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{26} +} + +func (x *DisableSpdkNvmfLogsReq) GetExtraLogFlags() []string { + if x != nil { + return x.ExtraLogFlags + } + return nil +} + +type SetSpdkNvmfLogsReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LogLevel *LogLevel `protobuf:"varint,1,opt,name=log_level,json=logLevel,proto3,enum=LogLevel,oneof" json:"log_level,omitempty"` + PrintLevel *LogLevel `protobuf:"varint,2,opt,name=print_level,json=printLevel,proto3,enum=LogLevel,oneof" json:"print_level,omitempty"` + ExtraLogFlags []string `protobuf:"bytes,3,rep,name=extra_log_flags,json=extraLogFlags,proto3" json:"extra_log_flags,omitempty"` +} + +func (x *SetSpdkNvmfLogsReq) Reset() { + *x = SetSpdkNvmfLogsReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetSpdkNvmfLogsReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetSpdkNvmfLogsReq) ProtoMessage() {} + +func (x *SetSpdkNvmfLogsReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetSpdkNvmfLogsReq.ProtoReflect.Descriptor instead. +func (*SetSpdkNvmfLogsReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{27} +} + +func (x *SetSpdkNvmfLogsReq) GetLogLevel() LogLevel { + if x != nil && x.LogLevel != nil { + return *x.LogLevel + } + return LogLevel_ERROR +} + +func (x *SetSpdkNvmfLogsReq) GetPrintLevel() LogLevel { + if x != nil && x.PrintLevel != nil { + return *x.PrintLevel + } + return LogLevel_ERROR +} + +func (x *SetSpdkNvmfLogsReq) GetExtraLogFlags() []string { + if x != nil { + return x.ExtraLogFlags + } + return nil +} + +type GetGatewayInfoReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CliVersion *string `protobuf:"bytes,1,opt,name=cli_version,json=cliVersion,proto3,oneof" json:"cli_version,omitempty"` +} + +func (x *GetGatewayInfoReq) Reset() { + *x = GetGatewayInfoReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetGatewayInfoReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGatewayInfoReq) ProtoMessage() {} + +func (x *GetGatewayInfoReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGatewayInfoReq.ProtoReflect.Descriptor instead. +func (*GetGatewayInfoReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{28} +} + +func (x *GetGatewayInfoReq) GetCliVersion() string { + if x != nil && x.CliVersion != nil { + return *x.CliVersion + } + return "" +} + +type GetGatewayLogLevelReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetGatewayLogLevelReq) Reset() { + *x = GetGatewayLogLevelReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetGatewayLogLevelReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGatewayLogLevelReq) ProtoMessage() {} + +func (x *GetGatewayLogLevelReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGatewayLogLevelReq.ProtoReflect.Descriptor instead. +func (*GetGatewayLogLevelReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{29} +} + +type SetGatewayLogLevelReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LogLevel GwLogLevel `protobuf:"varint,1,opt,name=log_level,json=logLevel,proto3,enum=GwLogLevel" json:"log_level,omitempty"` +} + +func (x *SetGatewayLogLevelReq) Reset() { + *x = SetGatewayLogLevelReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetGatewayLogLevelReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetGatewayLogLevelReq) ProtoMessage() {} + +func (x *SetGatewayLogLevelReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetGatewayLogLevelReq.ProtoReflect.Descriptor instead. +func (*SetGatewayLogLevelReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{30} +} + +func (x *SetGatewayLogLevelReq) GetLogLevel() GwLogLevel { + if x != nil { + return x.LogLevel + } + return GwLogLevel_notset +} + +type ShowGatewayListenersInfoReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubsystemNqn string `protobuf:"bytes,1,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` +} + +func (x *ShowGatewayListenersInfoReq) Reset() { + *x = ShowGatewayListenersInfoReq{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShowGatewayListenersInfoReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShowGatewayListenersInfoReq) ProtoMessage() {} + +func (x *ShowGatewayListenersInfoReq) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShowGatewayListenersInfoReq.ProtoReflect.Descriptor instead. +func (*ShowGatewayListenersInfoReq) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{31} +} + +func (x *ShowGatewayListenersInfoReq) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +type AnaGroupState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpId uint32 `protobuf:"varint,1,opt,name=grp_id,json=grpId,proto3" json:"grp_id,omitempty"` // groupd id + State AnaState `protobuf:"varint,2,opt,name=state,proto3,enum=AnaState" json:"state,omitempty"` // ANA state +} + +func (x *AnaGroupState) Reset() { + *x = AnaGroupState{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnaGroupState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnaGroupState) ProtoMessage() {} + +func (x *AnaGroupState) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnaGroupState.ProtoReflect.Descriptor instead. +func (*AnaGroupState) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{32} +} + +func (x *AnaGroupState) GetGrpId() uint32 { + if x != nil { + return x.GrpId + } + return 0 +} + +func (x *AnaGroupState) GetState() AnaState { + if x != nil { + return x.State + } + return AnaState_UNSET +} + +type NqnAnaStates struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nqn string `protobuf:"bytes,1,opt,name=nqn,proto3" json:"nqn,omitempty"` // subsystem nqn + States []*AnaGroupState `protobuf:"bytes,2,rep,name=states,proto3" json:"states,omitempty"` // list of group states +} + +func (x *NqnAnaStates) Reset() { + *x = NqnAnaStates{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NqnAnaStates) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NqnAnaStates) ProtoMessage() {} + +func (x *NqnAnaStates) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NqnAnaStates.ProtoReflect.Descriptor instead. +func (*NqnAnaStates) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{33} +} + +func (x *NqnAnaStates) GetNqn() string { + if x != nil { + return x.Nqn + } + return "" +} + +func (x *NqnAnaStates) GetStates() []*AnaGroupState { + if x != nil { + return x.States + } + return nil +} + +type AnaInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + States []*NqnAnaStates `protobuf:"bytes,1,rep,name=states,proto3" json:"states,omitempty"` // list of nqn states +} + +func (x *AnaInfo) Reset() { + *x = AnaInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnaInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnaInfo) ProtoMessage() {} + +func (x *AnaInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnaInfo.ProtoReflect.Descriptor instead. +func (*AnaInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{34} +} + +func (x *AnaInfo) GetStates() []*NqnAnaStates { + if x != nil { + return x.States + } + return nil +} + +type ReqStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ReqStatus) Reset() { + *x = ReqStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReqStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReqStatus) ProtoMessage() {} + +func (x *ReqStatus) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReqStatus.ProtoReflect.Descriptor instead. +func (*ReqStatus) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{35} +} + +func (x *ReqStatus) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *ReqStatus) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +type SubsysStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Nqn string `protobuf:"bytes,3,opt,name=nqn,proto3" json:"nqn,omitempty"` +} + +func (x *SubsysStatus) Reset() { + *x = SubsysStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubsysStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubsysStatus) ProtoMessage() {} + +func (x *SubsysStatus) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubsysStatus.ProtoReflect.Descriptor instead. +func (*SubsysStatus) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{36} +} + +func (x *SubsysStatus) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *SubsysStatus) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *SubsysStatus) GetNqn() string { + if x != nil { + return x.Nqn + } + return "" +} + +type NsidStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Nsid uint32 `protobuf:"varint,3,opt,name=nsid,proto3" json:"nsid,omitempty"` +} + +func (x *NsidStatus) Reset() { + *x = NsidStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NsidStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NsidStatus) ProtoMessage() {} + +func (x *NsidStatus) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NsidStatus.ProtoReflect.Descriptor instead. +func (*NsidStatus) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{37} +} + +func (x *NsidStatus) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *NsidStatus) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *NsidStatus) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +type SubsystemsInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Subsystems []*Subsystem `protobuf:"bytes,1,rep,name=subsystems,proto3" json:"subsystems,omitempty"` +} + +func (x *SubsystemsInfo) Reset() { + *x = SubsystemsInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubsystemsInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubsystemsInfo) ProtoMessage() {} + +func (x *SubsystemsInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubsystemsInfo.ProtoReflect.Descriptor instead. +func (*SubsystemsInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{38} +} + +func (x *SubsystemsInfo) GetSubsystems() []*Subsystem { + if x != nil { + return x.Subsystems + } + return nil +} + +type Subsystem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nqn string `protobuf:"bytes,1,opt,name=nqn,proto3" json:"nqn,omitempty"` + Subtype string `protobuf:"bytes,2,opt,name=subtype,proto3" json:"subtype,omitempty"` + ListenAddresses []*ListenAddress `protobuf:"bytes,3,rep,name=listen_addresses,json=listenAddresses,proto3" json:"listen_addresses,omitempty"` + Hosts []*Host `protobuf:"bytes,4,rep,name=hosts,proto3" json:"hosts,omitempty"` + AllowAnyHost bool `protobuf:"varint,5,opt,name=allow_any_host,json=allowAnyHost,proto3" json:"allow_any_host,omitempty"` + SerialNumber *string `protobuf:"bytes,6,opt,name=serial_number,json=serialNumber,proto3,oneof" json:"serial_number,omitempty"` + ModelNumber *string `protobuf:"bytes,7,opt,name=model_number,json=modelNumber,proto3,oneof" json:"model_number,omitempty"` + MaxNamespaces *uint32 `protobuf:"varint,8,opt,name=max_namespaces,json=maxNamespaces,proto3,oneof" json:"max_namespaces,omitempty"` + MinCntlid *uint32 `protobuf:"varint,9,opt,name=min_cntlid,json=minCntlid,proto3,oneof" json:"min_cntlid,omitempty"` + MaxCntlid *uint32 `protobuf:"varint,10,opt,name=max_cntlid,json=maxCntlid,proto3,oneof" json:"max_cntlid,omitempty"` + Namespaces []*Namespace `protobuf:"bytes,11,rep,name=namespaces,proto3" json:"namespaces,omitempty"` + HasDhchapKey *bool `protobuf:"varint,12,opt,name=has_dhchap_key,json=hasDhchapKey,proto3,oneof" json:"has_dhchap_key,omitempty"` +} + +func (x *Subsystem) Reset() { + *x = Subsystem{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Subsystem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Subsystem) ProtoMessage() {} + +func (x *Subsystem) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Subsystem.ProtoReflect.Descriptor instead. +func (*Subsystem) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{39} +} + +func (x *Subsystem) GetNqn() string { + if x != nil { + return x.Nqn + } + return "" +} + +func (x *Subsystem) GetSubtype() string { + if x != nil { + return x.Subtype + } + return "" +} + +func (x *Subsystem) GetListenAddresses() []*ListenAddress { + if x != nil { + return x.ListenAddresses + } + return nil +} + +func (x *Subsystem) GetHosts() []*Host { + if x != nil { + return x.Hosts + } + return nil +} + +func (x *Subsystem) GetAllowAnyHost() bool { + if x != nil { + return x.AllowAnyHost + } + return false +} + +func (x *Subsystem) GetSerialNumber() string { + if x != nil && x.SerialNumber != nil { + return *x.SerialNumber + } + return "" +} + +func (x *Subsystem) GetModelNumber() string { + if x != nil && x.ModelNumber != nil { + return *x.ModelNumber + } + return "" +} + +func (x *Subsystem) GetMaxNamespaces() uint32 { + if x != nil && x.MaxNamespaces != nil { + return *x.MaxNamespaces + } + return 0 +} + +func (x *Subsystem) GetMinCntlid() uint32 { + if x != nil && x.MinCntlid != nil { + return *x.MinCntlid + } + return 0 +} + +func (x *Subsystem) GetMaxCntlid() uint32 { + if x != nil && x.MaxCntlid != nil { + return *x.MaxCntlid + } + return 0 +} + +func (x *Subsystem) GetNamespaces() []*Namespace { + if x != nil { + return x.Namespaces + } + return nil +} + +func (x *Subsystem) GetHasDhchapKey() bool { + if x != nil && x.HasDhchapKey != nil { + return *x.HasDhchapKey + } + return false +} + +type ListenAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Trtype string `protobuf:"bytes,1,opt,name=trtype,proto3" json:"trtype,omitempty"` + Adrfam string `protobuf:"bytes,2,opt,name=adrfam,proto3" json:"adrfam,omitempty"` + Traddr string `protobuf:"bytes,3,opt,name=traddr,proto3" json:"traddr,omitempty"` + Trsvcid string `protobuf:"bytes,4,opt,name=trsvcid,proto3" json:"trsvcid,omitempty"` + Transport *string `protobuf:"bytes,5,opt,name=transport,proto3,oneof" json:"transport,omitempty"` + Secure *bool `protobuf:"varint,6,opt,name=secure,proto3,oneof" json:"secure,omitempty"` +} + +func (x *ListenAddress) Reset() { + *x = ListenAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenAddress) ProtoMessage() {} + +func (x *ListenAddress) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenAddress.ProtoReflect.Descriptor instead. +func (*ListenAddress) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{40} +} + +func (x *ListenAddress) GetTrtype() string { + if x != nil { + return x.Trtype + } + return "" +} + +func (x *ListenAddress) GetAdrfam() string { + if x != nil { + return x.Adrfam + } + return "" +} + +func (x *ListenAddress) GetTraddr() string { + if x != nil { + return x.Traddr + } + return "" +} + +func (x *ListenAddress) GetTrsvcid() string { + if x != nil { + return x.Trsvcid + } + return "" +} + +func (x *ListenAddress) GetTransport() string { + if x != nil && x.Transport != nil { + return *x.Transport + } + return "" +} + +func (x *ListenAddress) GetSecure() bool { + if x != nil && x.Secure != nil { + return *x.Secure + } + return false +} + +type Namespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nsid uint32 `protobuf:"varint,1,opt,name=nsid,proto3" json:"nsid,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + BdevName *string `protobuf:"bytes,3,opt,name=bdev_name,json=bdevName,proto3,oneof" json:"bdev_name,omitempty"` + Nguid *string `protobuf:"bytes,4,opt,name=nguid,proto3,oneof" json:"nguid,omitempty"` + Uuid *string `protobuf:"bytes,5,opt,name=uuid,proto3,oneof" json:"uuid,omitempty"` + Anagrpid *uint32 `protobuf:"varint,6,opt,name=anagrpid,proto3,oneof" json:"anagrpid,omitempty"` + Nonce *string `protobuf:"bytes,7,opt,name=nonce,proto3,oneof" json:"nonce,omitempty"` + AutoVisible *bool `protobuf:"varint,8,opt,name=auto_visible,json=autoVisible,proto3,oneof" json:"auto_visible,omitempty"` + Hosts []string `protobuf:"bytes,9,rep,name=hosts,proto3" json:"hosts,omitempty"` +} + +func (x *Namespace) Reset() { + *x = Namespace{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Namespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Namespace) ProtoMessage() {} + +func (x *Namespace) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Namespace.ProtoReflect.Descriptor instead. +func (*Namespace) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{41} +} + +func (x *Namespace) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *Namespace) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Namespace) GetBdevName() string { + if x != nil && x.BdevName != nil { + return *x.BdevName + } + return "" +} + +func (x *Namespace) GetNguid() string { + if x != nil && x.Nguid != nil { + return *x.Nguid + } + return "" +} + +func (x *Namespace) GetUuid() string { + if x != nil && x.Uuid != nil { + return *x.Uuid + } + return "" +} + +func (x *Namespace) GetAnagrpid() uint32 { + if x != nil && x.Anagrpid != nil { + return *x.Anagrpid + } + return 0 +} + +func (x *Namespace) GetNonce() string { + if x != nil && x.Nonce != nil { + return *x.Nonce + } + return "" +} + +func (x *Namespace) GetAutoVisible() bool { + if x != nil && x.AutoVisible != nil { + return *x.AutoVisible + } + return false +} + +func (x *Namespace) GetHosts() []string { + if x != nil { + return x.Hosts + } + return nil +} + +type SubsystemsInfoCli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Subsystems []*SubsystemCli `protobuf:"bytes,3,rep,name=subsystems,proto3" json:"subsystems,omitempty"` +} + +func (x *SubsystemsInfoCli) Reset() { + *x = SubsystemsInfoCli{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubsystemsInfoCli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubsystemsInfoCli) ProtoMessage() {} + +func (x *SubsystemsInfoCli) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubsystemsInfoCli.ProtoReflect.Descriptor instead. +func (*SubsystemsInfoCli) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{42} +} + +func (x *SubsystemsInfoCli) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *SubsystemsInfoCli) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *SubsystemsInfoCli) GetSubsystems() []*SubsystemCli { + if x != nil { + return x.Subsystems + } + return nil +} + +type SubsystemCli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nqn string `protobuf:"bytes,1,opt,name=nqn,proto3" json:"nqn,omitempty"` + EnableHa bool `protobuf:"varint,2,opt,name=enable_ha,json=enableHa,proto3" json:"enable_ha,omitempty"` + SerialNumber string `protobuf:"bytes,3,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"` + ModelNumber string `protobuf:"bytes,4,opt,name=model_number,json=modelNumber,proto3" json:"model_number,omitempty"` + MinCntlid uint32 `protobuf:"varint,5,opt,name=min_cntlid,json=minCntlid,proto3" json:"min_cntlid,omitempty"` + MaxCntlid uint32 `protobuf:"varint,6,opt,name=max_cntlid,json=maxCntlid,proto3" json:"max_cntlid,omitempty"` + NamespaceCount uint32 `protobuf:"varint,7,opt,name=namespace_count,json=namespaceCount,proto3" json:"namespace_count,omitempty"` + Subtype string `protobuf:"bytes,8,opt,name=subtype,proto3" json:"subtype,omitempty"` + MaxNamespaces uint32 `protobuf:"varint,9,opt,name=max_namespaces,json=maxNamespaces,proto3" json:"max_namespaces,omitempty"` + HasDhchapKey *bool `protobuf:"varint,10,opt,name=has_dhchap_key,json=hasDhchapKey,proto3,oneof" json:"has_dhchap_key,omitempty"` + AllowAnyHost *bool `protobuf:"varint,11,opt,name=allow_any_host,json=allowAnyHost,proto3,oneof" json:"allow_any_host,omitempty"` + CreatedWithoutKey *bool `protobuf:"varint,12,opt,name=created_without_key,json=createdWithoutKey,proto3,oneof" json:"created_without_key,omitempty"` +} + +func (x *SubsystemCli) Reset() { + *x = SubsystemCli{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubsystemCli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubsystemCli) ProtoMessage() {} + +func (x *SubsystemCli) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubsystemCli.ProtoReflect.Descriptor instead. +func (*SubsystemCli) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{43} +} + +func (x *SubsystemCli) GetNqn() string { + if x != nil { + return x.Nqn + } + return "" +} + +func (x *SubsystemCli) GetEnableHa() bool { + if x != nil { + return x.EnableHa + } + return false +} + +func (x *SubsystemCli) GetSerialNumber() string { + if x != nil { + return x.SerialNumber + } + return "" +} + +func (x *SubsystemCli) GetModelNumber() string { + if x != nil { + return x.ModelNumber + } + return "" +} + +func (x *SubsystemCli) GetMinCntlid() uint32 { + if x != nil { + return x.MinCntlid + } + return 0 +} + +func (x *SubsystemCli) GetMaxCntlid() uint32 { + if x != nil { + return x.MaxCntlid + } + return 0 +} + +func (x *SubsystemCli) GetNamespaceCount() uint32 { + if x != nil { + return x.NamespaceCount + } + return 0 +} + +func (x *SubsystemCli) GetSubtype() string { + if x != nil { + return x.Subtype + } + return "" +} + +func (x *SubsystemCli) GetMaxNamespaces() uint32 { + if x != nil { + return x.MaxNamespaces + } + return 0 +} + +func (x *SubsystemCli) GetHasDhchapKey() bool { + if x != nil && x.HasDhchapKey != nil { + return *x.HasDhchapKey + } + return false +} + +func (x *SubsystemCli) GetAllowAnyHost() bool { + if x != nil && x.AllowAnyHost != nil { + return *x.AllowAnyHost + } + return false +} + +func (x *SubsystemCli) GetCreatedWithoutKey() bool { + if x != nil && x.CreatedWithoutKey != nil { + return *x.CreatedWithoutKey + } + return false +} + +type GatewayInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CliVersion string `protobuf:"bytes,1,opt,name=cli_version,json=cliVersion,proto3" json:"cli_version,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Group string `protobuf:"bytes,4,opt,name=group,proto3" json:"group,omitempty"` + Addr string `protobuf:"bytes,5,opt,name=addr,proto3" json:"addr,omitempty"` + Port string `protobuf:"bytes,6,opt,name=port,proto3" json:"port,omitempty"` + BoolStatus bool `protobuf:"varint,7,opt,name=bool_status,json=boolStatus,proto3" json:"bool_status,omitempty"` + Status int32 `protobuf:"varint,8,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,9,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + SpdkVersion *string `protobuf:"bytes,10,opt,name=spdk_version,json=spdkVersion,proto3,oneof" json:"spdk_version,omitempty"` + LoadBalancingGroup uint32 `protobuf:"varint,11,opt,name=load_balancing_group,json=loadBalancingGroup,proto3" json:"load_balancing_group,omitempty"` + Hostname string `protobuf:"bytes,12,opt,name=hostname,proto3" json:"hostname,omitempty"` + MaxSubsystems *uint32 `protobuf:"varint,13,opt,name=max_subsystems,json=maxSubsystems,proto3,oneof" json:"max_subsystems,omitempty"` + MaxNamespaces *uint32 `protobuf:"varint,14,opt,name=max_namespaces,json=maxNamespaces,proto3,oneof" json:"max_namespaces,omitempty"` + MaxHostsPerSubsystem *uint32 `protobuf:"varint,15,opt,name=max_hosts_per_subsystem,json=maxHostsPerSubsystem,proto3,oneof" json:"max_hosts_per_subsystem,omitempty"` + MaxNamespacesPerSubsystem *uint32 `protobuf:"varint,16,opt,name=max_namespaces_per_subsystem,json=maxNamespacesPerSubsystem,proto3,oneof" json:"max_namespaces_per_subsystem,omitempty"` + MaxHosts *uint32 `protobuf:"varint,17,opt,name=max_hosts,json=maxHosts,proto3,oneof" json:"max_hosts,omitempty"` +} + +func (x *GatewayInfo) Reset() { + *x = GatewayInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GatewayInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GatewayInfo) ProtoMessage() {} + +func (x *GatewayInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GatewayInfo.ProtoReflect.Descriptor instead. +func (*GatewayInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{44} +} + +func (x *GatewayInfo) GetCliVersion() string { + if x != nil { + return x.CliVersion + } + return "" +} + +func (x *GatewayInfo) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *GatewayInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GatewayInfo) GetGroup() string { + if x != nil { + return x.Group + } + return "" +} + +func (x *GatewayInfo) GetAddr() string { + if x != nil { + return x.Addr + } + return "" +} + +func (x *GatewayInfo) GetPort() string { + if x != nil { + return x.Port + } + return "" +} + +func (x *GatewayInfo) GetBoolStatus() bool { + if x != nil { + return x.BoolStatus + } + return false +} + +func (x *GatewayInfo) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *GatewayInfo) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *GatewayInfo) GetSpdkVersion() string { + if x != nil && x.SpdkVersion != nil { + return *x.SpdkVersion + } + return "" +} + +func (x *GatewayInfo) GetLoadBalancingGroup() uint32 { + if x != nil { + return x.LoadBalancingGroup + } + return 0 +} + +func (x *GatewayInfo) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *GatewayInfo) GetMaxSubsystems() uint32 { + if x != nil && x.MaxSubsystems != nil { + return *x.MaxSubsystems + } + return 0 +} + +func (x *GatewayInfo) GetMaxNamespaces() uint32 { + if x != nil && x.MaxNamespaces != nil { + return *x.MaxNamespaces + } + return 0 +} + +func (x *GatewayInfo) GetMaxHostsPerSubsystem() uint32 { + if x != nil && x.MaxHostsPerSubsystem != nil { + return *x.MaxHostsPerSubsystem + } + return 0 +} + +func (x *GatewayInfo) GetMaxNamespacesPerSubsystem() uint32 { + if x != nil && x.MaxNamespacesPerSubsystem != nil { + return *x.MaxNamespacesPerSubsystem + } + return 0 +} + +func (x *GatewayInfo) GetMaxHosts() uint32 { + if x != nil && x.MaxHosts != nil { + return *x.MaxHosts + } + return 0 +} + +type CliVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *CliVersion) Reset() { + *x = CliVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CliVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CliVersion) ProtoMessage() {} + +func (x *CliVersion) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CliVersion.ProtoReflect.Descriptor instead. +func (*CliVersion) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{45} +} + +func (x *CliVersion) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *CliVersion) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *CliVersion) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +type GwVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *GwVersion) Reset() { + *x = GwVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GwVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GwVersion) ProtoMessage() {} + +func (x *GwVersion) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GwVersion.ProtoReflect.Descriptor instead. +func (*GwVersion) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{46} +} + +func (x *GwVersion) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *GwVersion) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *GwVersion) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +type ListenerInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + Trtype string `protobuf:"bytes,2,opt,name=trtype,proto3" json:"trtype,omitempty"` + Adrfam AddressFamily `protobuf:"varint,3,opt,name=adrfam,proto3,enum=AddressFamily" json:"adrfam,omitempty"` + Traddr string `protobuf:"bytes,4,opt,name=traddr,proto3" json:"traddr,omitempty"` + Trsvcid uint32 `protobuf:"varint,5,opt,name=trsvcid,proto3" json:"trsvcid,omitempty"` + Secure *bool `protobuf:"varint,6,opt,name=secure,proto3,oneof" json:"secure,omitempty"` +} + +func (x *ListenerInfo) Reset() { + *x = ListenerInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerInfo) ProtoMessage() {} + +func (x *ListenerInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerInfo.ProtoReflect.Descriptor instead. +func (*ListenerInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{47} +} + +func (x *ListenerInfo) GetHostName() string { + if x != nil { + return x.HostName + } + return "" +} + +func (x *ListenerInfo) GetTrtype() string { + if x != nil { + return x.Trtype + } + return "" +} + +func (x *ListenerInfo) GetAdrfam() AddressFamily { + if x != nil { + return x.Adrfam + } + return AddressFamily_ipv4 +} + +func (x *ListenerInfo) GetTraddr() string { + if x != nil { + return x.Traddr + } + return "" +} + +func (x *ListenerInfo) GetTrsvcid() uint32 { + if x != nil { + return x.Trsvcid + } + return 0 +} + +func (x *ListenerInfo) GetSecure() bool { + if x != nil && x.Secure != nil { + return *x.Secure + } + return false +} + +type ListenersInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Listeners []*ListenerInfo `protobuf:"bytes,3,rep,name=listeners,proto3" json:"listeners,omitempty"` +} + +func (x *ListenersInfo) Reset() { + *x = ListenersInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersInfo) ProtoMessage() {} + +func (x *ListenersInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersInfo.ProtoReflect.Descriptor instead. +func (*ListenersInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{48} +} + +func (x *ListenersInfo) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *ListenersInfo) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *ListenersInfo) GetListeners() []*ListenerInfo { + if x != nil { + return x.Listeners + } + return nil +} + +type GatewayListenerInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Listener *ListenerInfo `protobuf:"bytes,1,opt,name=listener,proto3" json:"listener,omitempty"` + LbStates []*AnaGroupState `protobuf:"bytes,2,rep,name=lb_states,json=lbStates,proto3" json:"lb_states,omitempty"` +} + +func (x *GatewayListenerInfo) Reset() { + *x = GatewayListenerInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GatewayListenerInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GatewayListenerInfo) ProtoMessage() {} + +func (x *GatewayListenerInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GatewayListenerInfo.ProtoReflect.Descriptor instead. +func (*GatewayListenerInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{49} +} + +func (x *GatewayListenerInfo) GetListener() *ListenerInfo { + if x != nil { + return x.Listener + } + return nil +} + +func (x *GatewayListenerInfo) GetLbStates() []*AnaGroupState { + if x != nil { + return x.LbStates + } + return nil +} + +type GatewayListenersInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + GwListeners []*GatewayListenerInfo `protobuf:"bytes,3,rep,name=gw_listeners,json=gwListeners,proto3" json:"gw_listeners,omitempty"` +} + +func (x *GatewayListenersInfo) Reset() { + *x = GatewayListenersInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GatewayListenersInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GatewayListenersInfo) ProtoMessage() {} + +func (x *GatewayListenersInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GatewayListenersInfo.ProtoReflect.Descriptor instead. +func (*GatewayListenersInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{50} +} + +func (x *GatewayListenersInfo) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *GatewayListenersInfo) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *GatewayListenersInfo) GetGwListeners() []*GatewayListenerInfo { + if x != nil { + return x.GwListeners + } + return nil +} + +type Host struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nqn string `protobuf:"bytes,1,opt,name=nqn,proto3" json:"nqn,omitempty"` + UsePsk *bool `protobuf:"varint,2,opt,name=use_psk,json=usePsk,proto3,oneof" json:"use_psk,omitempty"` + UseDhchap *bool `protobuf:"varint,3,opt,name=use_dhchap,json=useDhchap,proto3,oneof" json:"use_dhchap,omitempty"` + DisconnectedDueToKeepaliveTimeout *bool `protobuf:"varint,4,opt,name=disconnected_due_to_keepalive_timeout,json=disconnectedDueToKeepaliveTimeout,proto3,oneof" json:"disconnected_due_to_keepalive_timeout,omitempty"` +} + +func (x *Host) Reset() { + *x = Host{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Host) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Host) ProtoMessage() {} + +func (x *Host) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Host.ProtoReflect.Descriptor instead. +func (*Host) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{51} +} + +func (x *Host) GetNqn() string { + if x != nil { + return x.Nqn + } + return "" +} + +func (x *Host) GetUsePsk() bool { + if x != nil && x.UsePsk != nil { + return *x.UsePsk + } + return false +} + +func (x *Host) GetUseDhchap() bool { + if x != nil && x.UseDhchap != nil { + return *x.UseDhchap + } + return false +} + +func (x *Host) GetDisconnectedDueToKeepaliveTimeout() bool { + if x != nil && x.DisconnectedDueToKeepaliveTimeout != nil { + return *x.DisconnectedDueToKeepaliveTimeout + } + return false +} + +type HostsInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + AllowAnyHost bool `protobuf:"varint,3,opt,name=allow_any_host,json=allowAnyHost,proto3" json:"allow_any_host,omitempty"` + SubsystemNqn string `protobuf:"bytes,4,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Hosts []*Host `protobuf:"bytes,5,rep,name=hosts,proto3" json:"hosts,omitempty"` +} + +func (x *HostsInfo) Reset() { + *x = HostsInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HostsInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostsInfo) ProtoMessage() {} + +func (x *HostsInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostsInfo.ProtoReflect.Descriptor instead. +func (*HostsInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{52} +} + +func (x *HostsInfo) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *HostsInfo) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *HostsInfo) GetAllowAnyHost() bool { + if x != nil { + return x.AllowAnyHost + } + return false +} + +func (x *HostsInfo) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *HostsInfo) GetHosts() []*Host { + if x != nil { + return x.Hosts + } + return nil +} + +type Connection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nqn string `protobuf:"bytes,1,opt,name=nqn,proto3" json:"nqn,omitempty"` + Traddr string `protobuf:"bytes,2,opt,name=traddr,proto3" json:"traddr,omitempty"` + Trsvcid uint32 `protobuf:"varint,3,opt,name=trsvcid,proto3" json:"trsvcid,omitempty"` + Trtype string `protobuf:"bytes,4,opt,name=trtype,proto3" json:"trtype,omitempty"` + Adrfam AddressFamily `protobuf:"varint,5,opt,name=adrfam,proto3,enum=AddressFamily" json:"adrfam,omitempty"` + Connected bool `protobuf:"varint,6,opt,name=connected,proto3" json:"connected,omitempty"` + QpairsCount int32 `protobuf:"varint,7,opt,name=qpairs_count,json=qpairsCount,proto3" json:"qpairs_count,omitempty"` + ControllerId int32 `protobuf:"varint,8,opt,name=controller_id,json=controllerId,proto3" json:"controller_id,omitempty"` + Secure *bool `protobuf:"varint,9,opt,name=secure,proto3,oneof" json:"secure,omitempty"` + UsePsk *bool `protobuf:"varint,10,opt,name=use_psk,json=usePsk,proto3,oneof" json:"use_psk,omitempty"` + UseDhchap *bool `protobuf:"varint,11,opt,name=use_dhchap,json=useDhchap,proto3,oneof" json:"use_dhchap,omitempty"` + Subsystem *string `protobuf:"bytes,12,opt,name=subsystem,proto3,oneof" json:"subsystem,omitempty"` + DisconnectedDueToKeepaliveTimeout *bool `protobuf:"varint,13,opt,name=disconnected_due_to_keepalive_timeout,json=disconnectedDueToKeepaliveTimeout,proto3,oneof" json:"disconnected_due_to_keepalive_timeout,omitempty"` +} + +func (x *Connection) Reset() { + *x = Connection{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Connection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Connection) ProtoMessage() {} + +func (x *Connection) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Connection.ProtoReflect.Descriptor instead. +func (*Connection) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{53} +} + +func (x *Connection) GetNqn() string { + if x != nil { + return x.Nqn + } + return "" +} + +func (x *Connection) GetTraddr() string { + if x != nil { + return x.Traddr + } + return "" +} + +func (x *Connection) GetTrsvcid() uint32 { + if x != nil { + return x.Trsvcid + } + return 0 +} + +func (x *Connection) GetTrtype() string { + if x != nil { + return x.Trtype + } + return "" +} + +func (x *Connection) GetAdrfam() AddressFamily { + if x != nil { + return x.Adrfam + } + return AddressFamily_ipv4 +} + +func (x *Connection) GetConnected() bool { + if x != nil { + return x.Connected + } + return false +} + +func (x *Connection) GetQpairsCount() int32 { + if x != nil { + return x.QpairsCount + } + return 0 +} + +func (x *Connection) GetControllerId() int32 { + if x != nil { + return x.ControllerId + } + return 0 +} + +func (x *Connection) GetSecure() bool { + if x != nil && x.Secure != nil { + return *x.Secure + } + return false +} + +func (x *Connection) GetUsePsk() bool { + if x != nil && x.UsePsk != nil { + return *x.UsePsk + } + return false +} + +func (x *Connection) GetUseDhchap() bool { + if x != nil && x.UseDhchap != nil { + return *x.UseDhchap + } + return false +} + +func (x *Connection) GetSubsystem() string { + if x != nil && x.Subsystem != nil { + return *x.Subsystem + } + return "" +} + +func (x *Connection) GetDisconnectedDueToKeepaliveTimeout() bool { + if x != nil && x.DisconnectedDueToKeepaliveTimeout != nil { + return *x.DisconnectedDueToKeepaliveTimeout + } + return false +} + +type ConnectionsInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + SubsystemNqn string `protobuf:"bytes,3,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Connections []*Connection `protobuf:"bytes,4,rep,name=connections,proto3" json:"connections,omitempty"` +} + +func (x *ConnectionsInfo) Reset() { + *x = ConnectionsInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectionsInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectionsInfo) ProtoMessage() {} + +func (x *ConnectionsInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectionsInfo.ProtoReflect.Descriptor instead. +func (*ConnectionsInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{54} +} + +func (x *ConnectionsInfo) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *ConnectionsInfo) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *ConnectionsInfo) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *ConnectionsInfo) GetConnections() []*Connection { + if x != nil { + return x.Connections + } + return nil +} + +type NamespaceCli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nsid uint32 `protobuf:"varint,1,opt,name=nsid,proto3" json:"nsid,omitempty"` + BdevName string `protobuf:"bytes,2,opt,name=bdev_name,json=bdevName,proto3" json:"bdev_name,omitempty"` + RbdImageName string `protobuf:"bytes,3,opt,name=rbd_image_name,json=rbdImageName,proto3" json:"rbd_image_name,omitempty"` + RbdPoolName string `protobuf:"bytes,4,opt,name=rbd_pool_name,json=rbdPoolName,proto3" json:"rbd_pool_name,omitempty"` + LoadBalancingGroup uint32 `protobuf:"varint,5,opt,name=load_balancing_group,json=loadBalancingGroup,proto3" json:"load_balancing_group,omitempty"` + BlockSize uint32 `protobuf:"varint,6,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"` + RbdImageSize uint64 `protobuf:"varint,7,opt,name=rbd_image_size,json=rbdImageSize,proto3" json:"rbd_image_size,omitempty"` + Uuid string `protobuf:"bytes,8,opt,name=uuid,proto3" json:"uuid,omitempty"` + RwIosPerSecond uint64 `protobuf:"varint,9,opt,name=rw_ios_per_second,json=rwIosPerSecond,proto3" json:"rw_ios_per_second,omitempty"` + RwMbytesPerSecond uint64 `protobuf:"varint,10,opt,name=rw_mbytes_per_second,json=rwMbytesPerSecond,proto3" json:"rw_mbytes_per_second,omitempty"` + RMbytesPerSecond uint64 `protobuf:"varint,11,opt,name=r_mbytes_per_second,json=rMbytesPerSecond,proto3" json:"r_mbytes_per_second,omitempty"` + WMbytesPerSecond uint64 `protobuf:"varint,12,opt,name=w_mbytes_per_second,json=wMbytesPerSecond,proto3" json:"w_mbytes_per_second,omitempty"` + AutoVisible bool `protobuf:"varint,13,opt,name=auto_visible,json=autoVisible,proto3" json:"auto_visible,omitempty"` + Hosts []string `protobuf:"bytes,14,rep,name=hosts,proto3" json:"hosts,omitempty"` + NsSubsystemNqn *string `protobuf:"bytes,15,opt,name=ns_subsystem_nqn,json=nsSubsystemNqn,proto3,oneof" json:"ns_subsystem_nqn,omitempty"` + TrashImage *bool `protobuf:"varint,16,opt,name=trash_image,json=trashImage,proto3,oneof" json:"trash_image,omitempty"` + DisableAutoResize *bool `protobuf:"varint,17,opt,name=disable_auto_resize,json=disableAutoResize,proto3,oneof" json:"disable_auto_resize,omitempty"` + ReadOnly *bool `protobuf:"varint,18,opt,name=read_only,json=readOnly,proto3,oneof" json:"read_only,omitempty"` +} + +func (x *NamespaceCli) Reset() { + *x = NamespaceCli{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceCli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceCli) ProtoMessage() {} + +func (x *NamespaceCli) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceCli.ProtoReflect.Descriptor instead. +func (*NamespaceCli) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{55} +} + +func (x *NamespaceCli) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceCli) GetBdevName() string { + if x != nil { + return x.BdevName + } + return "" +} + +func (x *NamespaceCli) GetRbdImageName() string { + if x != nil { + return x.RbdImageName + } + return "" +} + +func (x *NamespaceCli) GetRbdPoolName() string { + if x != nil { + return x.RbdPoolName + } + return "" +} + +func (x *NamespaceCli) GetLoadBalancingGroup() uint32 { + if x != nil { + return x.LoadBalancingGroup + } + return 0 +} + +func (x *NamespaceCli) GetBlockSize() uint32 { + if x != nil { + return x.BlockSize + } + return 0 +} + +func (x *NamespaceCli) GetRbdImageSize() uint64 { + if x != nil { + return x.RbdImageSize + } + return 0 +} + +func (x *NamespaceCli) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *NamespaceCli) GetRwIosPerSecond() uint64 { + if x != nil { + return x.RwIosPerSecond + } + return 0 +} + +func (x *NamespaceCli) GetRwMbytesPerSecond() uint64 { + if x != nil { + return x.RwMbytesPerSecond + } + return 0 +} + +func (x *NamespaceCli) GetRMbytesPerSecond() uint64 { + if x != nil { + return x.RMbytesPerSecond + } + return 0 +} + +func (x *NamespaceCli) GetWMbytesPerSecond() uint64 { + if x != nil { + return x.WMbytesPerSecond + } + return 0 +} + +func (x *NamespaceCli) GetAutoVisible() bool { + if x != nil { + return x.AutoVisible + } + return false +} + +func (x *NamespaceCli) GetHosts() []string { + if x != nil { + return x.Hosts + } + return nil +} + +func (x *NamespaceCli) GetNsSubsystemNqn() string { + if x != nil && x.NsSubsystemNqn != nil { + return *x.NsSubsystemNqn + } + return "" +} + +func (x *NamespaceCli) GetTrashImage() bool { + if x != nil && x.TrashImage != nil { + return *x.TrashImage + } + return false +} + +func (x *NamespaceCli) GetDisableAutoResize() bool { + if x != nil && x.DisableAutoResize != nil { + return *x.DisableAutoResize + } + return false +} + +func (x *NamespaceCli) GetReadOnly() bool { + if x != nil && x.ReadOnly != nil { + return *x.ReadOnly + } + return false +} + +type NamespacesInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + SubsystemNqn string `protobuf:"bytes,3,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Namespaces []*NamespaceCli `protobuf:"bytes,4,rep,name=namespaces,proto3" json:"namespaces,omitempty"` +} + +func (x *NamespacesInfo) Reset() { + *x = NamespacesInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespacesInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespacesInfo) ProtoMessage() {} + +func (x *NamespacesInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespacesInfo.ProtoReflect.Descriptor instead. +func (*NamespacesInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{56} +} + +func (x *NamespacesInfo) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *NamespacesInfo) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *NamespacesInfo) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespacesInfo) GetNamespaces() []*NamespaceCli { + if x != nil { + return x.Namespaces + } + return nil +} + +type NamespaceIoError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value uint32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamespaceIoError) Reset() { + *x = NamespaceIoError{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceIoError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceIoError) ProtoMessage() {} + +func (x *NamespaceIoError) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceIoError.ProtoReflect.Descriptor instead. +func (*NamespaceIoError) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{57} +} + +func (x *NamespaceIoError) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamespaceIoError) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +type NamespaceIoStatsInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + SubsystemNqn string `protobuf:"bytes,3,opt,name=subsystem_nqn,json=subsystemNqn,proto3" json:"subsystem_nqn,omitempty"` + Nsid uint32 `protobuf:"varint,4,opt,name=nsid,proto3" json:"nsid,omitempty"` + Uuid *string `protobuf:"bytes,5,opt,name=uuid,proto3,oneof" json:"uuid,omitempty"` + BdevName string `protobuf:"bytes,6,opt,name=bdev_name,json=bdevName,proto3" json:"bdev_name,omitempty"` + TickRate uint64 `protobuf:"varint,7,opt,name=tick_rate,json=tickRate,proto3" json:"tick_rate,omitempty"` + Ticks uint64 `protobuf:"varint,8,opt,name=ticks,proto3" json:"ticks,omitempty"` + BytesRead uint64 `protobuf:"varint,9,opt,name=bytes_read,json=bytesRead,proto3" json:"bytes_read,omitempty"` + NumReadOps uint64 `protobuf:"varint,10,opt,name=num_read_ops,json=numReadOps,proto3" json:"num_read_ops,omitempty"` + BytesWritten uint64 `protobuf:"varint,11,opt,name=bytes_written,json=bytesWritten,proto3" json:"bytes_written,omitempty"` + NumWriteOps uint64 `protobuf:"varint,12,opt,name=num_write_ops,json=numWriteOps,proto3" json:"num_write_ops,omitempty"` + BytesUnmapped uint64 `protobuf:"varint,13,opt,name=bytes_unmapped,json=bytesUnmapped,proto3" json:"bytes_unmapped,omitempty"` + NumUnmapOps uint64 `protobuf:"varint,14,opt,name=num_unmap_ops,json=numUnmapOps,proto3" json:"num_unmap_ops,omitempty"` + ReadLatencyTicks uint64 `protobuf:"varint,15,opt,name=read_latency_ticks,json=readLatencyTicks,proto3" json:"read_latency_ticks,omitempty"` + MaxReadLatencyTicks uint64 `protobuf:"varint,16,opt,name=max_read_latency_ticks,json=maxReadLatencyTicks,proto3" json:"max_read_latency_ticks,omitempty"` + MinReadLatencyTicks uint64 `protobuf:"varint,17,opt,name=min_read_latency_ticks,json=minReadLatencyTicks,proto3" json:"min_read_latency_ticks,omitempty"` + WriteLatencyTicks uint64 `protobuf:"varint,18,opt,name=write_latency_ticks,json=writeLatencyTicks,proto3" json:"write_latency_ticks,omitempty"` + MaxWriteLatencyTicks uint64 `protobuf:"varint,19,opt,name=max_write_latency_ticks,json=maxWriteLatencyTicks,proto3" json:"max_write_latency_ticks,omitempty"` + MinWriteLatencyTicks uint64 `protobuf:"varint,20,opt,name=min_write_latency_ticks,json=minWriteLatencyTicks,proto3" json:"min_write_latency_ticks,omitempty"` + UnmapLatencyTicks uint64 `protobuf:"varint,21,opt,name=unmap_latency_ticks,json=unmapLatencyTicks,proto3" json:"unmap_latency_ticks,omitempty"` + MaxUnmapLatencyTicks uint64 `protobuf:"varint,22,opt,name=max_unmap_latency_ticks,json=maxUnmapLatencyTicks,proto3" json:"max_unmap_latency_ticks,omitempty"` + MinUnmapLatencyTicks uint64 `protobuf:"varint,23,opt,name=min_unmap_latency_ticks,json=minUnmapLatencyTicks,proto3" json:"min_unmap_latency_ticks,omitempty"` + CopyLatencyTicks uint64 `protobuf:"varint,24,opt,name=copy_latency_ticks,json=copyLatencyTicks,proto3" json:"copy_latency_ticks,omitempty"` + MaxCopyLatencyTicks uint64 `protobuf:"varint,25,opt,name=max_copy_latency_ticks,json=maxCopyLatencyTicks,proto3" json:"max_copy_latency_ticks,omitempty"` + MinCopyLatencyTicks uint64 `protobuf:"varint,26,opt,name=min_copy_latency_ticks,json=minCopyLatencyTicks,proto3" json:"min_copy_latency_ticks,omitempty"` + IoError []*NamespaceIoError `protobuf:"bytes,27,rep,name=io_error,json=ioError,proto3" json:"io_error,omitempty"` +} + +func (x *NamespaceIoStatsInfo) Reset() { + *x = NamespaceIoStatsInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceIoStatsInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamespaceIoStatsInfo) ProtoMessage() {} + +func (x *NamespaceIoStatsInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceIoStatsInfo.ProtoReflect.Descriptor instead. +func (*NamespaceIoStatsInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{58} +} + +func (x *NamespaceIoStatsInfo) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *NamespaceIoStatsInfo) GetSubsystemNqn() string { + if x != nil { + return x.SubsystemNqn + } + return "" +} + +func (x *NamespaceIoStatsInfo) GetNsid() uint32 { + if x != nil { + return x.Nsid + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetUuid() string { + if x != nil && x.Uuid != nil { + return *x.Uuid + } + return "" +} + +func (x *NamespaceIoStatsInfo) GetBdevName() string { + if x != nil { + return x.BdevName + } + return "" +} + +func (x *NamespaceIoStatsInfo) GetTickRate() uint64 { + if x != nil { + return x.TickRate + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetTicks() uint64 { + if x != nil { + return x.Ticks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetBytesRead() uint64 { + if x != nil { + return x.BytesRead + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetNumReadOps() uint64 { + if x != nil { + return x.NumReadOps + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetBytesWritten() uint64 { + if x != nil { + return x.BytesWritten + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetNumWriteOps() uint64 { + if x != nil { + return x.NumWriteOps + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetBytesUnmapped() uint64 { + if x != nil { + return x.BytesUnmapped + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetNumUnmapOps() uint64 { + if x != nil { + return x.NumUnmapOps + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetReadLatencyTicks() uint64 { + if x != nil { + return x.ReadLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetMaxReadLatencyTicks() uint64 { + if x != nil { + return x.MaxReadLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetMinReadLatencyTicks() uint64 { + if x != nil { + return x.MinReadLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetWriteLatencyTicks() uint64 { + if x != nil { + return x.WriteLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetMaxWriteLatencyTicks() uint64 { + if x != nil { + return x.MaxWriteLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetMinWriteLatencyTicks() uint64 { + if x != nil { + return x.MinWriteLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetUnmapLatencyTicks() uint64 { + if x != nil { + return x.UnmapLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetMaxUnmapLatencyTicks() uint64 { + if x != nil { + return x.MaxUnmapLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetMinUnmapLatencyTicks() uint64 { + if x != nil { + return x.MinUnmapLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetCopyLatencyTicks() uint64 { + if x != nil { + return x.CopyLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetMaxCopyLatencyTicks() uint64 { + if x != nil { + return x.MaxCopyLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetMinCopyLatencyTicks() uint64 { + if x != nil { + return x.MinCopyLatencyTicks + } + return 0 +} + +func (x *NamespaceIoStatsInfo) GetIoError() []*NamespaceIoError { + if x != nil { + return x.IoError + } + return nil +} + +type SpdkLogFlagInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *SpdkLogFlagInfo) Reset() { + *x = SpdkLogFlagInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpdkLogFlagInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpdkLogFlagInfo) ProtoMessage() {} + +func (x *SpdkLogFlagInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpdkLogFlagInfo.ProtoReflect.Descriptor instead. +func (*SpdkLogFlagInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{59} +} + +func (x *SpdkLogFlagInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SpdkLogFlagInfo) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +type SpdkNvmfLogFlagsAndLevelInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + NvmfLogFlags []*SpdkLogFlagInfo `protobuf:"bytes,3,rep,name=nvmf_log_flags,json=nvmfLogFlags,proto3" json:"nvmf_log_flags,omitempty"` + LogLevel LogLevel `protobuf:"varint,4,opt,name=log_level,json=logLevel,proto3,enum=LogLevel" json:"log_level,omitempty"` + LogPrintLevel LogLevel `protobuf:"varint,5,opt,name=log_print_level,json=logPrintLevel,proto3,enum=LogLevel" json:"log_print_level,omitempty"` +} + +func (x *SpdkNvmfLogFlagsAndLevelInfo) Reset() { + *x = SpdkNvmfLogFlagsAndLevelInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpdkNvmfLogFlagsAndLevelInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpdkNvmfLogFlagsAndLevelInfo) ProtoMessage() {} + +func (x *SpdkNvmfLogFlagsAndLevelInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpdkNvmfLogFlagsAndLevelInfo.ProtoReflect.Descriptor instead. +func (*SpdkNvmfLogFlagsAndLevelInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{60} +} + +func (x *SpdkNvmfLogFlagsAndLevelInfo) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *SpdkNvmfLogFlagsAndLevelInfo) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *SpdkNvmfLogFlagsAndLevelInfo) GetNvmfLogFlags() []*SpdkLogFlagInfo { + if x != nil { + return x.NvmfLogFlags + } + return nil +} + +func (x *SpdkNvmfLogFlagsAndLevelInfo) GetLogLevel() LogLevel { + if x != nil { + return x.LogLevel + } + return LogLevel_ERROR +} + +func (x *SpdkNvmfLogFlagsAndLevelInfo) GetLogPrintLevel() LogLevel { + if x != nil { + return x.LogPrintLevel + } + return LogLevel_ERROR +} + +type GatewayLogLevelInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + LogLevel GwLogLevel `protobuf:"varint,3,opt,name=log_level,json=logLevel,proto3,enum=GwLogLevel" json:"log_level,omitempty"` +} + +func (x *GatewayLogLevelInfo) Reset() { + *x = GatewayLogLevelInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateway_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GatewayLogLevelInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GatewayLogLevelInfo) ProtoMessage() {} + +func (x *GatewayLogLevelInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateway_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GatewayLogLevelInfo.ProtoReflect.Descriptor instead. +func (*GatewayLogLevelInfo) Descriptor() ([]byte, []int) { + return file_gateway_proto_rawDescGZIP(), []int{61} +} + +func (x *GatewayLogLevelInfo) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *GatewayLogLevelInfo) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *GatewayLogLevelInfo) GetLogLevel() GwLogLevel { + if x != nil { + return x.LogLevel + } + return GwLogLevel_notset +} + +var File_gateway_proto protoreflect.FileDescriptor + +var file_gateway_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x87, 0x05, 0x0a, 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x64, + 0x64, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x22, 0x0a, 0x0d, 0x72, 0x62, 0x64, 0x5f, 0x70, 0x6f, 0x6f, + 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x62, + 0x64, 0x50, 0x6f, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x62, 0x64, + 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x62, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, + 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x17, 0x0a, 0x04, + 0x75, 0x75, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x04, 0x75, 0x75, + 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x61, 0x6e, 0x61, 0x67, 0x72, 0x70, 0x69, + 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x08, 0x61, 0x6e, 0x61, 0x67, 0x72, + 0x70, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x26, 0x0a, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x48, 0x03, 0x52, 0x0b, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x17, + 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x48, 0x04, 0x52, 0x04, + 0x73, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x05, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x88, + 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x6f, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x76, 0x69, + 0x73, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x06, 0x52, 0x0d, 0x6e, + 0x6f, 0x41, 0x75, 0x74, 0x6f, 0x56, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x12, + 0x24, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x07, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x73, 0x68, 0x49, 0x6d, 0x61, + 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x08, 0x52, 0x11, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x75, 0x74, + 0x6f, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x48, 0x09, 0x52, + 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, + 0x5f, 0x6e, 0x73, 0x69, 0x64, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x42, 0x0b, + 0x0a, 0x09, 0x5f, 0x61, 0x6e, 0x61, 0x67, 0x72, 0x70, 0x69, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x42, 0x07, 0x0a, 0x05, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x42, + 0x12, 0x0a, 0x10, 0x5f, 0x6e, 0x6f, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x76, 0x69, 0x73, 0x69, + 0x62, 0x6c, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x74, 0x72, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x22, 0xa6, 0x01, 0x0a, 0x14, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, + 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x4f, + 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x55, 0x75, + 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x53, 0x69, 0x7a, 0x65, + 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x75, 0x75, + 0x69, 0x64, 0x22, 0x91, 0x01, 0x0a, 0x1a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x67, 0x65, 0x74, 0x5f, 0x69, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x72, 0x65, + 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, + 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x4f, 0x42, + 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x0c, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x55, 0x75, 0x69, + 0x64, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, + 0x45, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x22, 0xde, 0x03, 0x0a, 0x15, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x71, 0x6f, 0x73, 0x5f, 0x72, 0x65, 0x71, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x4f, 0x42, 0x53, + 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x0c, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x55, 0x75, 0x69, 0x64, + 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x11, 0x72, 0x77, 0x5f, 0x69, 0x6f, 0x73, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, + 0x52, 0x0e, 0x72, 0x77, 0x49, 0x6f, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x14, 0x72, 0x77, 0x5f, 0x6d, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x48, 0x02, 0x52, 0x11, 0x72, 0x77, 0x4d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, + 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x13, 0x72, 0x5f, 0x6d, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x48, 0x03, 0x52, 0x10, 0x72, 0x4d, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, + 0x13, 0x77, 0x5f, 0x6d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x48, 0x04, 0x52, 0x10, 0x77, 0x4d, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x88, 0x01, + 0x01, 0x12, 0x19, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x48, 0x05, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, + 0x5f, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x42, 0x14, + 0x0a, 0x12, 0x5f, 0x72, 0x77, 0x5f, 0x69, 0x6f, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x72, 0x77, 0x5f, 0x6d, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x42, 0x16, 0x0a, + 0x14, 0x5f, 0x72, 0x5f, 0x6d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x77, 0x5f, 0x6d, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x42, 0x08, 0x0a, + 0x06, 0x5f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0xf7, 0x01, 0x0a, 0x29, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x73, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x12, 0x28, + 0x0a, 0x0d, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, + 0x45, 0x55, 0x75, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x6e, 0x61, 0x67, + 0x72, 0x70, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x61, 0x6e, 0x61, 0x67, + 0x72, 0x70, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x6c, 0x62, 0x5f, + 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0b, 0x61, + 0x75, 0x74, 0x6f, 0x4c, 0x62, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, + 0x0e, 0x5f, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x42, + 0x10, 0x0a, 0x0e, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x6c, 0x62, 0x5f, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x22, 0xa2, 0x01, 0x0a, 0x1f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x73, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x56, 0x69, 0x73, 0x69, 0x62, 0x6c, + 0x65, 0x12, 0x19, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x48, 0x00, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, + 0x5f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x7d, 0x0a, 0x21, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x62, 0x64, 0x5f, 0x74, 0x72, 0x61, 0x73, + 0x68, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, + 0x6e, 0x73, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x73, 0x68, + 0x49, 0x6d, 0x61, 0x67, 0x65, 0x22, 0x79, 0x0a, 0x1d, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x69, + 0x7a, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, + 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x73, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x12, + 0x1f, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, + 0x22, 0xba, 0x01, 0x0a, 0x14, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, + 0x69, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x75, + 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x4f, 0x42, 0x53, + 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x55, 0x75, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x09, + 0x69, 0x5f, 0x61, 0x6d, 0x5f, 0x73, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x01, 0x52, 0x07, 0x69, 0x41, 0x6d, 0x53, 0x75, 0x72, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, + 0x0e, 0x5f, 0x4f, 0x42, 0x53, 0x4f, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x42, + 0x0c, 0x0a, 0x0a, 0x5f, 0x69, 0x5f, 0x61, 0x6d, 0x5f, 0x73, 0x75, 0x72, 0x65, 0x22, 0x91, 0x01, + 0x0a, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x71, 0x6e, 0x12, 0x19, 0x0a, 0x05, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x05, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x66, 0x6f, 0x72, 0x63, + 0x65, 0x22, 0x6f, 0x0a, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x4e, 0x71, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x6e, 0x71, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x4e, + 0x71, 0x6e, 0x22, 0xec, 0x02, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, + 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, + 0x0d, 0x6d, 0x61, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x61, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x48, 0x61, 0x12, 0x2b, + 0x0a, 0x0f, 0x6e, 0x6f, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, + 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0d, 0x6e, 0x6f, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x64, + 0x68, 0x63, 0x68, 0x61, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x02, 0x52, 0x09, 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, + 0x28, 0x0a, 0x0d, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x48, 0x03, 0x52, 0x0c, 0x6b, 0x65, 0x79, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6d, 0x61, + 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x42, 0x12, 0x0a, 0x10, + 0x5f, 0x6e, 0x6f, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x42, + 0x10, 0x0a, 0x0e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, + 0x64, 0x22, 0x60, 0x0a, 0x14, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x19, + 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x22, 0x72, 0x0a, 0x18, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x75, + 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x22, 0x0a, 0x0a, 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x64, 0x68, 0x63, 0x68, + 0x61, 0x70, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x64, 0x68, 0x63, + 0x68, 0x61, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x77, 0x0a, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x1c, + 0x0a, 0x09, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x12, 0x17, 0x0a, 0x04, + 0x6e, 0x73, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x73, + 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x17, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x88, 0x01, 0x01, 0x42, 0x07, + 0x0a, 0x05, 0x5f, 0x6e, 0x73, 0x69, 0x64, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x75, 0x75, 0x69, 0x64, + 0x22, 0x98, 0x02, 0x0a, 0x0c, 0x61, 0x64, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, + 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, + 0x71, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x71, + 0x6e, 0x12, 0x15, 0x0a, 0x03, 0x70, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x03, 0x70, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x64, 0x68, 0x63, 0x68, + 0x61, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, + 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, + 0x70, 0x73, 0x6b, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x0c, 0x70, 0x73, 0x6b, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x03, 0x52, + 0x0c, 0x6b, 0x65, 0x79, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, + 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x70, 0x73, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x64, 0x68, 0x63, + 0x68, 0x61, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x70, 0x73, 0x6b, 0x5f, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x22, 0x88, 0x01, 0x0a, 0x13, + 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, + 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x68, 0x6f, 0x73, 0x74, + 0x4e, 0x71, 0x6e, 0x12, 0x22, 0x0a, 0x0a, 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x64, 0x68, 0x63, 0x68, 0x61, + 0x70, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x64, 0x68, 0x63, 0x68, + 0x61, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x51, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x19, + 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x71, 0x6e, 0x22, 0x67, 0x0a, 0x0e, 0x6c, 0x69, 0x73, + 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x73, + 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x12, 0x26, 0x0a, 0x0c, 0x63, 0x6c, 0x65, + 0x61, 0x72, 0x5f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x65, 0x61, 0x72, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x73, 0x88, 0x01, + 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x63, 0x6c, 0x65, 0x61, 0x72, 0x5f, 0x61, 0x6c, 0x65, 0x72, + 0x74, 0x73, 0x22, 0x34, 0x0a, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, + 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xab, 0x02, 0x0a, 0x13, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, + 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6e, + 0x71, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x74, 0x72, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x74, 0x72, 0x61, 0x64, 0x64, 0x72, 0x12, 0x2b, 0x0a, 0x06, 0x61, 0x64, 0x72, 0x66, 0x61, + 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, 0x61, 0x64, 0x72, 0x66, 0x61, + 0x6d, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x74, 0x72, 0x73, 0x76, 0x63, 0x69, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x07, 0x74, 0x72, 0x73, 0x76, 0x63, 0x69, 0x64, + 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x75, 0x72, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x06, 0x73, 0x65, 0x63, 0x75, 0x72, 0x65, 0x88, 0x01, 0x01, + 0x12, 0x2d, 0x0a, 0x10, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x48, 0x03, 0x52, 0x0e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, + 0x09, 0x0a, 0x07, 0x5f, 0x61, 0x64, 0x72, 0x66, 0x61, 0x6d, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, + 0x72, 0x73, 0x76, 0x63, 0x69, 0x64, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, + 0x65, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xe4, 0x01, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x10, + 0x0a, 0x03, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6e, 0x71, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x74, 0x72, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x72, 0x61, 0x64, 0x64, 0x72, 0x12, 0x2b, 0x0a, 0x06, 0x61, 0x64, 0x72, 0x66, 0x61, 0x6d, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, 0x61, 0x64, 0x72, 0x66, 0x61, 0x6d, 0x88, + 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x74, 0x72, 0x73, 0x76, 0x63, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x07, 0x74, 0x72, 0x73, 0x76, 0x63, 0x69, 0x64, 0x88, 0x01, + 0x01, 0x12, 0x19, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x48, 0x02, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, + 0x5f, 0x61, 0x64, 0x72, 0x66, 0x61, 0x6d, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, 0x72, 0x73, 0x76, + 0x63, 0x69, 0x64, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x32, 0x0a, + 0x12, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, + 0x72, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x75, 0x62, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, + 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x0c, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, + 0x0e, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x42, + 0x10, 0x0a, 0x0e, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x22, 0x14, 0x0a, 0x12, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x22, 0x62, 0x0a, 0x25, 0x67, 0x65, 0x74, 0x5f, 0x73, + 0x70, 0x64, 0x6b, 0x5f, 0x6e, 0x76, 0x6d, 0x66, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x61, + 0x67, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, + 0x12, 0x27, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x61, 0x67, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x6c, 0x6c, 0x4c, 0x6f, + 0x67, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x61, 0x6c, + 0x6c, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x44, 0x0a, 0x1a, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x70, 0x64, 0x6b, 0x5f, 0x6e, 0x76, 0x6d, 0x66, + 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x78, 0x74, + 0x72, 0x61, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x72, 0x61, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x61, 0x67, + 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x16, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x70, 0x64, 0x6b, 0x5f, 0x6e, + 0x76, 0x6d, 0x66, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x2b, 0x0a, 0x09, + 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x09, 0x2e, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x6f, + 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x0b, 0x70, 0x72, 0x69, + 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, + 0x2e, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, + 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x78, + 0x74, 0x72, 0x61, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x72, 0x61, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x22, 0x4c, 0x0a, 0x14, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0a, 0x63, 0x6c, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0e, + 0x0a, 0x0c, 0x5f, 0x63, 0x6c, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1b, + 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x6f, + 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x22, 0x45, 0x0a, 0x19, 0x73, + 0x65, 0x74, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x28, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x47, 0x77, + 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x22, 0x46, 0x0a, 0x1f, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x22, 0x4a, 0x0a, 0x0f, 0x61, 0x6e, + 0x61, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, + 0x06, 0x67, 0x72, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x67, + 0x72, 0x70, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x61, 0x6e, 0x61, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x4c, 0x0a, 0x0e, 0x6e, 0x71, 0x6e, 0x5f, 0x61, 0x6e, + 0x61, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x71, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6e, 0x71, 0x6e, 0x12, 0x28, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x61, 0x6e, 0x61, + 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x73, 0x22, 0x33, 0x0a, 0x08, 0x61, 0x6e, 0x61, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x12, 0x27, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6e, 0x71, 0x6e, 0x5f, 0x61, 0x6e, 0x61, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x22, 0x49, 0x0a, 0x0a, 0x72, 0x65, 0x71, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x5e, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x71, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6e, 0x71, 0x6e, 0x22, 0x5e, 0x0a, 0x0b, 0x6e, 0x73, 0x69, 0x64, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, + 0x6e, 0x73, 0x69, 0x64, 0x22, 0x3d, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x2a, 0x0a, 0x0a, 0x73, 0x75, 0x62, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x73, 0x75, + 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x73, 0x22, 0xba, 0x04, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6e, 0x71, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3a, 0x0a, + 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x05, 0x68, 0x6f, 0x73, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x68, 0x6f, 0x73, 0x74, 0x52, + 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, + 0x61, 0x6e, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6e, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0d, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x5f, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x0b, + 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2a, + 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x02, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x69, + 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x03, + 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x43, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x22, + 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0d, 0x48, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x43, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x88, + 0x01, 0x01, 0x12, 0x2a, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x29, + 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x5f, 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x05, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x44, 0x68, 0x63, + 0x68, 0x61, 0x70, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, + 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x11, 0x0a, 0x0f, + 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x42, + 0x0d, 0x0a, 0x0b, 0x5f, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x42, 0x0d, + 0x0a, 0x0b, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x42, 0x11, 0x0a, + 0x0f, 0x5f, 0x68, 0x61, 0x73, 0x5f, 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x5f, 0x6b, 0x65, 0x79, + 0x22, 0xcb, 0x01, 0x0a, 0x0e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x72, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x61, + 0x64, 0x72, 0x66, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x64, 0x72, + 0x66, 0x61, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x72, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x61, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x74, + 0x72, 0x73, 0x76, 0x63, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, + 0x73, 0x76, 0x63, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x09, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x75, + 0x72, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x73, 0x65, 0x63, 0x75, + 0x72, 0x65, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x65, 0x22, 0xcc, + 0x02, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x73, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x64, 0x65, 0x76, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x62, 0x64, 0x65, 0x76, 0x4e, + 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x6e, 0x67, 0x75, 0x69, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x05, 0x6e, 0x67, 0x75, 0x69, 0x64, 0x88, 0x01, + 0x01, 0x12, 0x17, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x02, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x61, 0x6e, + 0x61, 0x67, 0x72, 0x70, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x03, 0x52, 0x08, + 0x61, 0x6e, 0x61, 0x67, 0x72, 0x70, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x6e, + 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x04, 0x52, 0x05, 0x6e, 0x6f, + 0x6e, 0x63, 0x65, 0x88, 0x01, 0x01, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x76, + 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x48, 0x05, 0x52, 0x0b, + 0x61, 0x75, 0x74, 0x6f, 0x56, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x12, 0x14, + 0x0a, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x68, + 0x6f, 0x73, 0x74, 0x73, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x62, 0x64, 0x65, 0x76, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6e, 0x67, 0x75, 0x69, 0x64, 0x42, 0x07, 0x0a, 0x05, + 0x5f, 0x75, 0x75, 0x69, 0x64, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x6e, 0x61, 0x67, 0x72, 0x70, + 0x69, 0x64, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x42, 0x0f, 0x0a, 0x0d, + 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x22, 0x82, 0x01, + 0x0a, 0x13, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x5f, 0x63, 0x6c, 0x69, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x0a, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x5f, 0x63, 0x6c, 0x69, 0x52, 0x0a, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x73, 0x22, 0xf7, 0x03, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x5f, 0x63, 0x6c, 0x69, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6e, 0x71, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x68, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x48, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x6f, 0x64, 0x65, + 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, + 0x69, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x6d, 0x69, 0x6e, 0x43, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, + 0x78, 0x5f, 0x63, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, + 0x6d, 0x61, 0x78, 0x43, 0x6e, 0x74, 0x6c, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x74, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x5f, 0x64, 0x68, 0x63, 0x68, 0x61, + 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x68, + 0x61, 0x73, 0x44, 0x68, 0x63, 0x68, 0x61, 0x70, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x29, + 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6e, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, + 0x6e, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x42, 0x11, + 0x0a, 0x0f, 0x5f, 0x68, 0x61, 0x73, 0x5f, 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x5f, 0x6b, 0x65, + 0x79, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6e, 0x79, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x77, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0xed, 0x05, 0x0a, + 0x0c, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, + 0x0b, 0x63, 0x6c, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6f, + 0x6f, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x70, 0x64, 0x6b, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x0b, 0x73, 0x70, 0x64, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, + 0x12, 0x30, 0x0a, 0x14, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, + 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, + 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x73, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x53, 0x75, 0x62, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x73, 0x88, 0x01, 0x01, 0x12, 0x2a, 0x0a, 0x0e, 0x6d, 0x61, + 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x0d, 0x48, 0x02, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x48, 0x6f, + 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x88, + 0x01, 0x01, 0x12, 0x44, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x04, 0x52, 0x19, 0x6d, 0x61, 0x78, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x05, 0x52, 0x08, 0x6d, + 0x61, 0x78, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, + 0x70, 0x64, 0x6b, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x73, 0x42, 0x11, + 0x0a, 0x0f, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x42, 0x1f, 0x0a, + 0x1d, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x22, 0x64, 0x0a, 0x0b, + 0x63, 0x6c, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x63, 0x0a, 0x0a, 0x67, 0x77, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xc6, 0x01, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x6f, 0x73, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, + 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x72, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x74, 0x79, 0x70, 0x65, 0x12, 0x26, + 0x0a, 0x06, 0x61, 0x64, 0x72, 0x66, 0x61, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, + 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x06, + 0x61, 0x64, 0x72, 0x66, 0x61, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x72, 0x61, 0x64, 0x64, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x61, 0x64, 0x64, 0x72, 0x12, 0x18, + 0x0a, 0x07, 0x74, 0x72, 0x73, 0x76, 0x63, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x74, 0x72, 0x73, 0x76, 0x63, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x75, + 0x72, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65, 0x63, 0x75, + 0x72, 0x65, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x65, + 0x22, 0x7b, 0x0a, 0x0e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x2c, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x22, 0x72, 0x0a, + 0x15, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x2a, 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x61, 0x6e, 0x61, 0x5f, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x6c, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x73, 0x22, 0x90, 0x01, 0x0a, 0x16, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x67, 0x77, 0x5f, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x67, 0x77, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x73, 0x22, 0xf6, 0x01, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6e, 0x71, 0x6e, 0x12, + 0x1c, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x48, 0x00, 0x52, 0x06, 0x75, 0x73, 0x65, 0x50, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, + 0x0a, 0x75, 0x73, 0x65, 0x5f, 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x01, 0x52, 0x09, 0x75, 0x73, 0x65, 0x44, 0x68, 0x63, 0x68, 0x61, 0x70, 0x88, 0x01, + 0x01, 0x12, 0x55, 0x0a, 0x25, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x5f, 0x64, 0x75, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, + 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x48, 0x02, 0x52, 0x21, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x44, 0x75, 0x65, 0x54, 0x6f, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x75, 0x73, 0x65, + 0x5f, 0x70, 0x73, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75, 0x73, 0x65, 0x5f, 0x64, 0x68, 0x63, + 0x68, 0x61, 0x70, 0x42, 0x28, 0x0a, 0x26, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x75, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xb1, 0x01, + 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6e, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6e, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x1b, 0x0a, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x05, 0x68, 0x6f, 0x73, 0x74, + 0x73, 0x22, 0xad, 0x04, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x71, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6e, + 0x71, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x72, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x61, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, + 0x73, 0x76, 0x63, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x74, 0x72, 0x73, + 0x76, 0x63, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x72, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x74, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x06, + 0x61, 0x64, 0x72, 0x66, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x06, 0x61, 0x64, + 0x72, 0x66, 0x61, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x71, 0x70, 0x61, 0x69, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x71, 0x70, 0x61, 0x69, 0x72, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1c, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x70, + 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x50, + 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x5f, 0x64, 0x68, 0x63, + 0x68, 0x61, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x09, 0x75, 0x73, 0x65, + 0x44, 0x68, 0x63, 0x68, 0x61, 0x70, 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, 0x09, 0x73, 0x75, 0x62, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x09, + 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x88, 0x01, 0x01, 0x12, 0x55, 0x0a, 0x25, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x75, 0x65, + 0x5f, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x21, 0x64, + 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x44, 0x75, 0x65, 0x54, 0x6f, + 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x65, 0x42, 0x0a, + 0x0a, 0x08, 0x5f, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x73, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75, + 0x73, 0x65, 0x5f, 0x64, 0x68, 0x63, 0x68, 0x61, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x73, 0x75, + 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x42, 0x28, 0x0a, 0x26, 0x5f, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x75, 0x65, 0x5f, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x22, 0xa3, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x2d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xff, 0x05, 0x0a, 0x0d, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x69, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x73, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x62, 0x64, 0x65, 0x76, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x62, 0x64, 0x65, 0x76, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x62, + 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x62, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x22, 0x0a, 0x0d, 0x72, 0x62, 0x64, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x62, 0x64, 0x50, 0x6f, 0x6f, 0x6c, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x12, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x62, 0x64, 0x5f, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, + 0x62, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, + 0x29, 0x0a, 0x11, 0x72, 0x77, 0x5f, 0x69, 0x6f, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x72, 0x77, 0x49, 0x6f, + 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x72, 0x77, + 0x5f, 0x6d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x77, 0x4d, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2d, 0x0a, 0x13, 0x72, + 0x5f, 0x6d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x72, 0x4d, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2d, 0x0a, 0x13, 0x77, 0x5f, + 0x6d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x77, 0x4d, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, + 0x6f, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x61, 0x75, 0x74, 0x6f, 0x56, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x68, 0x6f, 0x73, + 0x74, 0x73, 0x12, 0x2d, 0x0a, 0x10, 0x6e, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, + 0x6e, 0x73, 0x53, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x88, 0x01, + 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x73, 0x68, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x11, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x75, 0x74, 0x6f, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x03, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x88, 0x01, 0x01, 0x42, 0x13, + 0x0a, 0x11, 0x5f, 0x6e, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, + 0x6e, 0x71, 0x6e, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x74, 0x72, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x22, 0xa3, 0x01, 0x0a, 0x0f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, + 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, + 0x2e, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x63, 0x6c, 0x69, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, + 0x3e, 0x0a, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x6f, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0xf2, 0x08, 0x0a, 0x17, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x6f, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6e, 0x71, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4e, 0x71, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x73, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6e, 0x73, 0x69, + 0x64, 0x12, 0x17, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x64, + 0x65, 0x76, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, + 0x64, 0x65, 0x76, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x63, 0x6b, 0x5f, + 0x72, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x74, 0x69, 0x63, 0x6b, + 0x52, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, + 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x70, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0c, 0x62, 0x79, 0x74, 0x65, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, + 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x75, 0x6d, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x70, + 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x70, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x75, 0x6e, + 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x55, 0x6e, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x6e, + 0x75, 0x6d, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x70, 0x5f, 0x6f, 0x70, 0x73, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x55, 0x6e, 0x6d, 0x61, 0x70, 0x4f, 0x70, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, + 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x72, 0x65, 0x61, + 0x64, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x33, 0x0a, + 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x6d, + 0x61, 0x78, 0x52, 0x65, 0x61, 0x64, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x69, 0x63, + 0x6b, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, + 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x13, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x69, 0x63, + 0x6b, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x35, + 0x0a, 0x17, 0x6d, 0x69, 0x6e, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x14, 0x6d, 0x69, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x54, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x75, 0x6e, 0x6d, 0x61, 0x70, 0x5f, 0x6c, + 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x11, 0x75, 0x6e, 0x6d, 0x61, 0x70, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x54, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x6e, 0x6d, + 0x61, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, + 0x18, 0x16, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x55, 0x6e, 0x6d, 0x61, 0x70, + 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x35, 0x0a, 0x17, + 0x6d, 0x69, 0x6e, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6d, + 0x69, 0x6e, 0x55, 0x6e, 0x6d, 0x61, 0x70, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x69, + 0x63, 0x6b, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x10, 0x63, 0x6f, 0x70, 0x79, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x69, 0x63, 0x6b, + 0x73, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x6c, 0x61, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x70, 0x79, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x54, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6f, + 0x70, 0x79, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73, + 0x18, 0x1a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x6d, 0x69, 0x6e, 0x43, 0x6f, 0x70, 0x79, 0x4c, + 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x08, 0x69, + 0x6f, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x07, 0x69, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x5f, + 0x75, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x12, 0x73, 0x70, 0x64, 0x6b, 0x5f, 0x6c, 0x6f, 0x67, + 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xf7, 0x01, 0x0a, 0x22, 0x73, 0x70, 0x64, + 0x6b, 0x5f, 0x6e, 0x76, 0x6d, 0x66, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, + 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x39, 0x0a, 0x0e, + 0x6e, 0x76, 0x6d, 0x66, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, + 0x66, 0x6c, 0x61, 0x67, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x6e, 0x76, 0x6d, 0x66, 0x4c, + 0x6f, 0x67, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x4c, 0x6f, 0x67, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x31, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x4c, 0x6f, 0x67, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x69, 0x6e, 0x74, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x22, 0x7f, 0x0a, 0x16, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x6f, + 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x09, 0x6c, 0x6f, 0x67, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x47, + 0x77, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x2a, 0x23, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, + 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36, 0x10, 0x01, 0x2a, 0x43, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, + 0x4e, 0x4f, 0x54, 0x49, 0x43, 0x45, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, + 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x04, 0x2a, 0x53, 0x0a, + 0x0a, 0x47, 0x77, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0a, 0x0a, 0x06, 0x6e, + 0x6f, 0x74, 0x73, 0x65, 0x74, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x10, 0x0a, 0x12, 0x08, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x10, 0x14, 0x12, 0x0b, 0x0a, 0x07, + 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x1e, 0x12, 0x09, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x10, 0x28, 0x12, 0x0c, 0x0a, 0x08, 0x63, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, + 0x10, 0x32, 0x2a, 0x4a, 0x0a, 0x09, 0x61, 0x6e, 0x61, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x09, 0x0a, 0x05, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x50, + 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x4e, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, + 0x49, 0x4e, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x49, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x32, 0x89, + 0x11, 0x0a, 0x07, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x33, 0x0a, 0x0d, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x12, 0x12, 0x2e, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x1a, + 0x0c, 0x2e, 0x6e, 0x73, 0x69, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, + 0x3b, 0x0a, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x12, 0x15, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x62, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0e, 0x2e, 0x73, 0x75, 0x62, + 0x73, 0x79, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x10, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x12, 0x15, 0x2e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x14, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x12, 0x19, + 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x6c, 0x69, 0x73, 0x74, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x14, 0x2e, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, + 0x71, 0x1a, 0x10, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x10, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x72, 0x65, 0x71, + 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, + 0x51, 0x0a, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x74, + 0x5f, 0x69, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x5f, 0x69, 0x6f, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x69, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x22, 0x00, 0x12, 0x41, 0x0a, 0x18, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x5f, 0x71, 0x6f, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x16, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x71, + 0x6f, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x25, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x2a, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x1b, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x76, 0x69, + 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x20, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x76, 0x69, 0x73, 0x69, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x1d, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x62, 0x64, 0x5f, 0x74, + 0x72, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x22, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x62, 0x64, 0x5f, 0x74, + 0x72, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, + 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x4a, 0x0a, + 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x61, + 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, + 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x10, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x15, 0x2e, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x17, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, + 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0x00, 0x12, 0x42, 0x0a, 0x15, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1a, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x28, 0x0a, 0x08, 0x61, 0x64, 0x64, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x12, 0x0d, 0x2e, 0x61, 0x64, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, + 0x2e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, + 0x36, 0x0a, 0x0f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x2c, 0x0a, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x0f, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x15, 0x2e, 0x6c, 0x69, 0x73, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x71, + 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x14, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, + 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x36, 0x0a, + 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x12, 0x14, 0x2e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x0e, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x13, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, + 0x3f, 0x0a, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x14, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x5f, 0x63, 0x6c, 0x69, 0x22, 0x00, + 0x12, 0x39, 0x0a, 0x0e, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x13, 0x2e, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x29, 0x0a, 0x0d, 0x73, + 0x65, 0x74, 0x5f, 0x61, 0x6e, 0x61, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x09, 0x2e, 0x61, + 0x6e, 0x61, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x21, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x70, + 0x64, 0x6b, 0x5f, 0x6e, 0x76, 0x6d, 0x66, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x61, 0x67, + 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x26, 0x2e, 0x67, 0x65, + 0x74, 0x5f, 0x73, 0x70, 0x64, 0x6b, 0x5f, 0x6e, 0x76, 0x6d, 0x66, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, + 0x66, 0x6c, 0x61, 0x67, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x1a, 0x23, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x5f, 0x6e, 0x76, 0x6d, 0x66, 0x5f, + 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x44, 0x0a, 0x16, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x70, 0x64, 0x6b, 0x5f, 0x6e, 0x76, 0x6d, 0x66, 0x5f, + 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x1b, 0x2e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, + 0x70, 0x64, 0x6b, 0x5f, 0x6e, 0x76, 0x6d, 0x66, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x72, 0x65, + 0x71, 0x1a, 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, + 0x12, 0x3c, 0x0a, 0x12, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x70, 0x64, 0x6b, 0x5f, 0x6e, 0x76, 0x6d, + 0x66, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x17, 0x2e, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x70, 0x64, + 0x6b, 0x5f, 0x6e, 0x76, 0x6d, 0x66, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x1a, + 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x3a, + 0x0a, 0x10, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x15, 0x2e, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x5f, 0x72, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x15, 0x67, 0x65, + 0x74, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x1a, 0x2e, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x1a, + 0x17, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x15, 0x73, 0x65, + 0x74, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x1a, 0x2e, 0x73, 0x65, 0x74, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x1a, + 0x0b, 0x2e, 0x72, 0x65, 0x71, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x5a, + 0x0a, 0x1b, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, + 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x5f, 0x72, 0x65, 0x71, 0x1a, + 0x17, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x70, 0x68, 0x2f, 0x63, 0x65, + 0x70, 0x68, 0x2d, 0x6e, 0x76, 0x6d, 0x65, 0x6f, 0x66, 0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x67, 0x6f, + 0x2f, 0x6e, 0x76, 0x6d, 0x65, 0x6f, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_gateway_proto_rawDescOnce sync.Once + file_gateway_proto_rawDescData = file_gateway_proto_rawDesc +) + +func file_gateway_proto_rawDescGZIP() []byte { + file_gateway_proto_rawDescOnce.Do(func() { + file_gateway_proto_rawDescData = protoimpl.X.CompressGZIP(file_gateway_proto_rawDescData) + }) + return file_gateway_proto_rawDescData +} + +var file_gateway_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_gateway_proto_msgTypes = make([]protoimpl.MessageInfo, 62) +var file_gateway_proto_goTypes = []interface{}{ + (AddressFamily)(0), // 0: AddressFamily + (LogLevel)(0), // 1: LogLevel + (GwLogLevel)(0), // 2: GwLogLevel + (AnaState)(0), // 3: ana_state + (*NamespaceAddReq)(nil), // 4: namespace_add_req + (*NamespaceResizeReq)(nil), // 5: namespace_resize_req + (*NamespaceGetIoStatsReq)(nil), // 6: namespace_get_io_stats_req + (*NamespaceSetQosReq)(nil), // 7: namespace_set_qos_req + (*NamespaceChangeLoadBalancingGroupReq)(nil), // 8: namespace_change_load_balancing_group_req + (*NamespaceChangeVisibilityReq)(nil), // 9: namespace_change_visibility_req + (*NamespaceSetRbdTrashImageReq)(nil), // 10: namespace_set_rbd_trash_image_req + (*NamespaceSetAutoResizeReq)(nil), // 11: namespace_set_auto_resize_req + (*NamespaceDeleteReq)(nil), // 12: namespace_delete_req + (*NamespaceAddHostReq)(nil), // 13: namespace_add_host_req + (*NamespaceDeleteHostReq)(nil), // 14: namespace_delete_host_req + (*CreateSubsystemReq)(nil), // 15: create_subsystem_req + (*DeleteSubsystemReq)(nil), // 16: delete_subsystem_req + (*ChangeSubsystemKeyReq)(nil), // 17: change_subsystem_key_req + (*ListNamespacesReq)(nil), // 18: list_namespaces_req + (*AddHostReq)(nil), // 19: add_host_req + (*ChangeHostKeyReq)(nil), // 20: change_host_key_req + (*RemoveHostReq)(nil), // 21: remove_host_req + (*ListHostsReq)(nil), // 22: list_hosts_req + (*ListConnectionsReq)(nil), // 23: list_connections_req + (*CreateListenerReq)(nil), // 24: create_listener_req + (*DeleteListenerReq)(nil), // 25: delete_listener_req + (*ListListenersReq)(nil), // 26: list_listeners_req + (*ListSubsystemsReq)(nil), // 27: list_subsystems_req + (*GetSubsystemsReq)(nil), // 28: get_subsystems_req + (*GetSpdkNvmfLogFlagsAndLevelReq)(nil), // 29: get_spdk_nvmf_log_flags_and_level_req + (*DisableSpdkNvmfLogsReq)(nil), // 30: disable_spdk_nvmf_logs_req + (*SetSpdkNvmfLogsReq)(nil), // 31: set_spdk_nvmf_logs_req + (*GetGatewayInfoReq)(nil), // 32: get_gateway_info_req + (*GetGatewayLogLevelReq)(nil), // 33: get_gateway_log_level_req + (*SetGatewayLogLevelReq)(nil), // 34: set_gateway_log_level_req + (*ShowGatewayListenersInfoReq)(nil), // 35: show_gateway_listeners_info_req + (*AnaGroupState)(nil), // 36: ana_group_state + (*NqnAnaStates)(nil), // 37: nqn_ana_states + (*AnaInfo)(nil), // 38: ana_info + (*ReqStatus)(nil), // 39: req_status + (*SubsysStatus)(nil), // 40: subsys_status + (*NsidStatus)(nil), // 41: nsid_status + (*SubsystemsInfo)(nil), // 42: subsystems_info + (*Subsystem)(nil), // 43: subsystem + (*ListenAddress)(nil), // 44: listen_address + (*Namespace)(nil), // 45: namespace + (*SubsystemsInfoCli)(nil), // 46: subsystems_info_cli + (*SubsystemCli)(nil), // 47: subsystem_cli + (*GatewayInfo)(nil), // 48: gateway_info + (*CliVersion)(nil), // 49: cli_version + (*GwVersion)(nil), // 50: gw_version + (*ListenerInfo)(nil), // 51: listener_info + (*ListenersInfo)(nil), // 52: listeners_info + (*GatewayListenerInfo)(nil), // 53: gateway_listener_info + (*GatewayListenersInfo)(nil), // 54: gateway_listeners_info + (*Host)(nil), // 55: host + (*HostsInfo)(nil), // 56: hosts_info + (*Connection)(nil), // 57: connection + (*ConnectionsInfo)(nil), // 58: connections_info + (*NamespaceCli)(nil), // 59: namespace_cli + (*NamespacesInfo)(nil), // 60: namespaces_info + (*NamespaceIoError)(nil), // 61: namespace_io_error + (*NamespaceIoStatsInfo)(nil), // 62: namespace_io_stats_info + (*SpdkLogFlagInfo)(nil), // 63: spdk_log_flag_info + (*SpdkNvmfLogFlagsAndLevelInfo)(nil), // 64: spdk_nvmf_log_flags_and_level_info + (*GatewayLogLevelInfo)(nil), // 65: gateway_log_level_info +} +var file_gateway_proto_depIdxs = []int32{ + 0, // 0: create_listener_req.adrfam:type_name -> AddressFamily + 0, // 1: delete_listener_req.adrfam:type_name -> AddressFamily + 1, // 2: set_spdk_nvmf_logs_req.log_level:type_name -> LogLevel + 1, // 3: set_spdk_nvmf_logs_req.print_level:type_name -> LogLevel + 2, // 4: set_gateway_log_level_req.log_level:type_name -> GwLogLevel + 3, // 5: ana_group_state.state:type_name -> ana_state + 36, // 6: nqn_ana_states.states:type_name -> ana_group_state + 37, // 7: ana_info.states:type_name -> nqn_ana_states + 43, // 8: subsystems_info.subsystems:type_name -> subsystem + 44, // 9: subsystem.listen_addresses:type_name -> listen_address + 55, // 10: subsystem.hosts:type_name -> host + 45, // 11: subsystem.namespaces:type_name -> namespace + 47, // 12: subsystems_info_cli.subsystems:type_name -> subsystem_cli + 0, // 13: listener_info.adrfam:type_name -> AddressFamily + 51, // 14: listeners_info.listeners:type_name -> listener_info + 51, // 15: gateway_listener_info.listener:type_name -> listener_info + 36, // 16: gateway_listener_info.lb_states:type_name -> ana_group_state + 53, // 17: gateway_listeners_info.gw_listeners:type_name -> gateway_listener_info + 55, // 18: hosts_info.hosts:type_name -> host + 0, // 19: connection.adrfam:type_name -> AddressFamily + 57, // 20: connections_info.connections:type_name -> connection + 59, // 21: namespaces_info.namespaces:type_name -> namespace_cli + 61, // 22: namespace_io_stats_info.io_error:type_name -> namespace_io_error + 63, // 23: spdk_nvmf_log_flags_and_level_info.nvmf_log_flags:type_name -> spdk_log_flag_info + 1, // 24: spdk_nvmf_log_flags_and_level_info.log_level:type_name -> LogLevel + 1, // 25: spdk_nvmf_log_flags_and_level_info.log_print_level:type_name -> LogLevel + 2, // 26: gateway_log_level_info.log_level:type_name -> GwLogLevel + 4, // 27: Gateway.namespace_add:input_type -> namespace_add_req + 15, // 28: Gateway.create_subsystem:input_type -> create_subsystem_req + 16, // 29: Gateway.delete_subsystem:input_type -> delete_subsystem_req + 17, // 30: Gateway.change_subsystem_key:input_type -> change_subsystem_key_req + 18, // 31: Gateway.list_namespaces:input_type -> list_namespaces_req + 5, // 32: Gateway.namespace_resize:input_type -> namespace_resize_req + 6, // 33: Gateway.namespace_get_io_stats:input_type -> namespace_get_io_stats_req + 7, // 34: Gateway.namespace_set_qos_limits:input_type -> namespace_set_qos_req + 8, // 35: Gateway.namespace_change_load_balancing_group:input_type -> namespace_change_load_balancing_group_req + 9, // 36: Gateway.namespace_change_visibility:input_type -> namespace_change_visibility_req + 10, // 37: Gateway.namespace_set_rbd_trash_image:input_type -> namespace_set_rbd_trash_image_req + 11, // 38: Gateway.namespace_set_auto_resize:input_type -> namespace_set_auto_resize_req + 12, // 39: Gateway.namespace_delete:input_type -> namespace_delete_req + 13, // 40: Gateway.namespace_add_host:input_type -> namespace_add_host_req + 14, // 41: Gateway.namespace_delete_host:input_type -> namespace_delete_host_req + 19, // 42: Gateway.add_host:input_type -> add_host_req + 21, // 43: Gateway.remove_host:input_type -> remove_host_req + 20, // 44: Gateway.change_host_key:input_type -> change_host_key_req + 22, // 45: Gateway.list_hosts:input_type -> list_hosts_req + 23, // 46: Gateway.list_connections:input_type -> list_connections_req + 24, // 47: Gateway.create_listener:input_type -> create_listener_req + 25, // 48: Gateway.delete_listener:input_type -> delete_listener_req + 26, // 49: Gateway.list_listeners:input_type -> list_listeners_req + 27, // 50: Gateway.list_subsystems:input_type -> list_subsystems_req + 28, // 51: Gateway.get_subsystems:input_type -> get_subsystems_req + 38, // 52: Gateway.set_ana_state:input_type -> ana_info + 29, // 53: Gateway.get_spdk_nvmf_log_flags_and_level:input_type -> get_spdk_nvmf_log_flags_and_level_req + 30, // 54: Gateway.disable_spdk_nvmf_logs:input_type -> disable_spdk_nvmf_logs_req + 31, // 55: Gateway.set_spdk_nvmf_logs:input_type -> set_spdk_nvmf_logs_req + 32, // 56: Gateway.get_gateway_info:input_type -> get_gateway_info_req + 33, // 57: Gateway.get_gateway_log_level:input_type -> get_gateway_log_level_req + 34, // 58: Gateway.set_gateway_log_level:input_type -> set_gateway_log_level_req + 35, // 59: Gateway.show_gateway_listeners_info:input_type -> show_gateway_listeners_info_req + 41, // 60: Gateway.namespace_add:output_type -> nsid_status + 40, // 61: Gateway.create_subsystem:output_type -> subsys_status + 39, // 62: Gateway.delete_subsystem:output_type -> req_status + 39, // 63: Gateway.change_subsystem_key:output_type -> req_status + 60, // 64: Gateway.list_namespaces:output_type -> namespaces_info + 39, // 65: Gateway.namespace_resize:output_type -> req_status + 62, // 66: Gateway.namespace_get_io_stats:output_type -> namespace_io_stats_info + 39, // 67: Gateway.namespace_set_qos_limits:output_type -> req_status + 39, // 68: Gateway.namespace_change_load_balancing_group:output_type -> req_status + 39, // 69: Gateway.namespace_change_visibility:output_type -> req_status + 39, // 70: Gateway.namespace_set_rbd_trash_image:output_type -> req_status + 39, // 71: Gateway.namespace_set_auto_resize:output_type -> req_status + 39, // 72: Gateway.namespace_delete:output_type -> req_status + 39, // 73: Gateway.namespace_add_host:output_type -> req_status + 39, // 74: Gateway.namespace_delete_host:output_type -> req_status + 39, // 75: Gateway.add_host:output_type -> req_status + 39, // 76: Gateway.remove_host:output_type -> req_status + 39, // 77: Gateway.change_host_key:output_type -> req_status + 56, // 78: Gateway.list_hosts:output_type -> hosts_info + 58, // 79: Gateway.list_connections:output_type -> connections_info + 39, // 80: Gateway.create_listener:output_type -> req_status + 39, // 81: Gateway.delete_listener:output_type -> req_status + 52, // 82: Gateway.list_listeners:output_type -> listeners_info + 46, // 83: Gateway.list_subsystems:output_type -> subsystems_info_cli + 42, // 84: Gateway.get_subsystems:output_type -> subsystems_info + 39, // 85: Gateway.set_ana_state:output_type -> req_status + 64, // 86: Gateway.get_spdk_nvmf_log_flags_and_level:output_type -> spdk_nvmf_log_flags_and_level_info + 39, // 87: Gateway.disable_spdk_nvmf_logs:output_type -> req_status + 39, // 88: Gateway.set_spdk_nvmf_logs:output_type -> req_status + 48, // 89: Gateway.get_gateway_info:output_type -> gateway_info + 65, // 90: Gateway.get_gateway_log_level:output_type -> gateway_log_level_info + 39, // 91: Gateway.set_gateway_log_level:output_type -> req_status + 54, // 92: Gateway.show_gateway_listeners_info:output_type -> gateway_listeners_info + 60, // [60:93] is the sub-list for method output_type + 27, // [27:60] is the sub-list for method input_type + 27, // [27:27] is the sub-list for extension type_name + 27, // [27:27] is the sub-list for extension extendee + 0, // [0:27] is the sub-list for field type_name +} + +func init() { file_gateway_proto_init() } +func file_gateway_proto_init() { + if File_gateway_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_gateway_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceAddReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceResizeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceGetIoStatsReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceSetQosReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceChangeLoadBalancingGroupReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceChangeVisibilityReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceSetRbdTrashImageReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceSetAutoResizeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceDeleteReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceAddHostReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceDeleteHostReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSubsystemReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSubsystemReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangeSubsystemKeyReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNamespacesReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddHostReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangeHostKeyReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveHostReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListHostsReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListConnectionsReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateListenerReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteListenerReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListListenersReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSubsystemsReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubsystemsReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSpdkNvmfLogFlagsAndLevelReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DisableSpdkNvmfLogsReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetSpdkNvmfLogsReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetGatewayInfoReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetGatewayLogLevelReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetGatewayLogLevelReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShowGatewayListenersInfoReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnaGroupState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NqnAnaStates); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnaInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReqStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubsysStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NsidStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubsystemsInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Subsystem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Namespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubsystemsInfoCli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubsystemCli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GatewayInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CliVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GwVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GatewayListenerInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GatewayListenersInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Host); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HostsInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Connection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectionsInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceCli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespacesInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceIoError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceIoStatsInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SpdkLogFlagInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SpdkNvmfLogFlagsAndLevelInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateway_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GatewayLogLevelInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_gateway_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[3].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[5].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[8].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[9].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[11].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[12].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[14].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[15].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[16].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[18].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[20].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[21].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[23].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[25].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[27].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[28].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[39].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[40].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[41].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[43].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[44].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[47].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[51].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[53].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[55].OneofWrappers = []interface{}{} + file_gateway_proto_msgTypes[58].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_gateway_proto_rawDesc, + NumEnums: 4, + NumMessages: 62, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_gateway_proto_goTypes, + DependencyIndexes: file_gateway_proto_depIdxs, + EnumInfos: file_gateway_proto_enumTypes, + MessageInfos: file_gateway_proto_msgTypes, + }.Build() + File_gateway_proto = out.File + file_gateway_proto_rawDesc = nil + file_gateway_proto_goTypes = nil + file_gateway_proto_depIdxs = nil +} diff --git a/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/gateway_grpc.pb.go b/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/gateway_grpc.pb.go new file mode 100644 index 00000000000..6f80cc57aaf --- /dev/null +++ b/vendor/github.com/ceph/ceph-nvmeof/lib/go/nvmeof/gateway_grpc.pb.go @@ -0,0 +1,1412 @@ +// +// Copyright (c) 2021 International Business Machines +// All rights reserved. +// +// SPDX-License-Identifier: MIT +// +// Authors: anita.shekar@ibm.com, sandy.kaur@ibm.com +// + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.21.12 +// source: gateway.proto + +package nvmeof + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Gateway_NamespaceAdd_FullMethodName = "/Gateway/namespace_add" + Gateway_CreateSubsystem_FullMethodName = "/Gateway/create_subsystem" + Gateway_DeleteSubsystem_FullMethodName = "/Gateway/delete_subsystem" + Gateway_ChangeSubsystemKey_FullMethodName = "/Gateway/change_subsystem_key" + Gateway_ListNamespaces_FullMethodName = "/Gateway/list_namespaces" + Gateway_NamespaceResize_FullMethodName = "/Gateway/namespace_resize" + Gateway_NamespaceGetIoStats_FullMethodName = "/Gateway/namespace_get_io_stats" + Gateway_NamespaceSetQosLimits_FullMethodName = "/Gateway/namespace_set_qos_limits" + Gateway_NamespaceChangeLoadBalancingGroup_FullMethodName = "/Gateway/namespace_change_load_balancing_group" + Gateway_NamespaceChangeVisibility_FullMethodName = "/Gateway/namespace_change_visibility" + Gateway_NamespaceSetRbdTrashImage_FullMethodName = "/Gateway/namespace_set_rbd_trash_image" + Gateway_NamespaceSetAutoResize_FullMethodName = "/Gateway/namespace_set_auto_resize" + Gateway_NamespaceDelete_FullMethodName = "/Gateway/namespace_delete" + Gateway_NamespaceAddHost_FullMethodName = "/Gateway/namespace_add_host" + Gateway_NamespaceDeleteHost_FullMethodName = "/Gateway/namespace_delete_host" + Gateway_AddHost_FullMethodName = "/Gateway/add_host" + Gateway_RemoveHost_FullMethodName = "/Gateway/remove_host" + Gateway_ChangeHostKey_FullMethodName = "/Gateway/change_host_key" + Gateway_ListHosts_FullMethodName = "/Gateway/list_hosts" + Gateway_ListConnections_FullMethodName = "/Gateway/list_connections" + Gateway_CreateListener_FullMethodName = "/Gateway/create_listener" + Gateway_DeleteListener_FullMethodName = "/Gateway/delete_listener" + Gateway_ListListeners_FullMethodName = "/Gateway/list_listeners" + Gateway_ListSubsystems_FullMethodName = "/Gateway/list_subsystems" + Gateway_GetSubsystems_FullMethodName = "/Gateway/get_subsystems" + Gateway_SetAnaState_FullMethodName = "/Gateway/set_ana_state" + Gateway_GetSpdkNvmfLogFlagsAndLevel_FullMethodName = "/Gateway/get_spdk_nvmf_log_flags_and_level" + Gateway_DisableSpdkNvmfLogs_FullMethodName = "/Gateway/disable_spdk_nvmf_logs" + Gateway_SetSpdkNvmfLogs_FullMethodName = "/Gateway/set_spdk_nvmf_logs" + Gateway_GetGatewayInfo_FullMethodName = "/Gateway/get_gateway_info" + Gateway_GetGatewayLogLevel_FullMethodName = "/Gateway/get_gateway_log_level" + Gateway_SetGatewayLogLevel_FullMethodName = "/Gateway/set_gateway_log_level" + Gateway_ShowGatewayListenersInfo_FullMethodName = "/Gateway/show_gateway_listeners_info" +) + +// GatewayClient is the client API for Gateway service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GatewayClient interface { + // Creates a namespace from an RBD image + NamespaceAdd(ctx context.Context, in *NamespaceAddReq, opts ...grpc.CallOption) (*NsidStatus, error) + // Creates a subsystem + CreateSubsystem(ctx context.Context, in *CreateSubsystemReq, opts ...grpc.CallOption) (*SubsysStatus, error) + // Deletes a subsystem + DeleteSubsystem(ctx context.Context, in *DeleteSubsystemReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Changes subsystem key + ChangeSubsystemKey(ctx context.Context, in *ChangeSubsystemKeyReq, opts ...grpc.CallOption) (*ReqStatus, error) + // List namespaces + ListNamespaces(ctx context.Context, in *ListNamespacesReq, opts ...grpc.CallOption) (*NamespacesInfo, error) + // Resizes a namespace + NamespaceResize(ctx context.Context, in *NamespaceResizeReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Gets namespace's IO stats + NamespaceGetIoStats(ctx context.Context, in *NamespaceGetIoStatsReq, opts ...grpc.CallOption) (*NamespaceIoStatsInfo, error) + // Sets namespace's qos limits + NamespaceSetQosLimits(ctx context.Context, in *NamespaceSetQosReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Changes namespace's load balancing group + NamespaceChangeLoadBalancingGroup(ctx context.Context, in *NamespaceChangeLoadBalancingGroupReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Changes namespace's visibility + NamespaceChangeVisibility(ctx context.Context, in *NamespaceChangeVisibilityReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Set namespace's RBD trash image flag + NamespaceSetRbdTrashImage(ctx context.Context, in *NamespaceSetRbdTrashImageReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Set namespace's auto resize flag + NamespaceSetAutoResize(ctx context.Context, in *NamespaceSetAutoResizeReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Deletes a namespace + NamespaceDelete(ctx context.Context, in *NamespaceDeleteReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Adds a host to a namespace + NamespaceAddHost(ctx context.Context, in *NamespaceAddHostReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Deletes a host from a namespace + NamespaceDeleteHost(ctx context.Context, in *NamespaceDeleteHostReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Adds a host to a subsystem + AddHost(ctx context.Context, in *AddHostReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Removes a host from a subsystem + RemoveHost(ctx context.Context, in *RemoveHostReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Changes a host inband authentication keys + ChangeHostKey(ctx context.Context, in *ChangeHostKeyReq, opts ...grpc.CallOption) (*ReqStatus, error) + // List hosts + ListHosts(ctx context.Context, in *ListHostsReq, opts ...grpc.CallOption) (*HostsInfo, error) + // List connections + ListConnections(ctx context.Context, in *ListConnectionsReq, opts ...grpc.CallOption) (*ConnectionsInfo, error) + // Creates a listener for a subsystem at a given IP/Port + CreateListener(ctx context.Context, in *CreateListenerReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Deletes a listener from a subsystem at a given IP/Port + DeleteListener(ctx context.Context, in *DeleteListenerReq, opts ...grpc.CallOption) (*ReqStatus, error) + // List listeners + ListListeners(ctx context.Context, in *ListListenersReq, opts ...grpc.CallOption) (*ListenersInfo, error) + // List subsystems + ListSubsystems(ctx context.Context, in *ListSubsystemsReq, opts ...grpc.CallOption) (*SubsystemsInfoCli, error) + // Gets subsystems + GetSubsystems(ctx context.Context, in *GetSubsystemsReq, opts ...grpc.CallOption) (*SubsystemsInfo, error) + // Set gateway ANA states + SetAnaState(ctx context.Context, in *AnaInfo, opts ...grpc.CallOption) (*ReqStatus, error) + // Gets spdk nvmf log flags and level + GetSpdkNvmfLogFlagsAndLevel(ctx context.Context, in *GetSpdkNvmfLogFlagsAndLevelReq, opts ...grpc.CallOption) (*SpdkNvmfLogFlagsAndLevelInfo, error) + // Disables spdk nvmf logs + DisableSpdkNvmfLogs(ctx context.Context, in *DisableSpdkNvmfLogsReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Set spdk nvmf logs + SetSpdkNvmfLogs(ctx context.Context, in *SetSpdkNvmfLogsReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Get gateway info + GetGatewayInfo(ctx context.Context, in *GetGatewayInfoReq, opts ...grpc.CallOption) (*GatewayInfo, error) + // Get gateway log level + GetGatewayLogLevel(ctx context.Context, in *GetGatewayLogLevelReq, opts ...grpc.CallOption) (*GatewayLogLevelInfo, error) + // Set gateway log level + SetGatewayLogLevel(ctx context.Context, in *SetGatewayLogLevelReq, opts ...grpc.CallOption) (*ReqStatus, error) + // Show gateway listeners info + ShowGatewayListenersInfo(ctx context.Context, in *ShowGatewayListenersInfoReq, opts ...grpc.CallOption) (*GatewayListenersInfo, error) +} + +type gatewayClient struct { + cc grpc.ClientConnInterface +} + +func NewGatewayClient(cc grpc.ClientConnInterface) GatewayClient { + return &gatewayClient{cc} +} + +func (c *gatewayClient) NamespaceAdd(ctx context.Context, in *NamespaceAddReq, opts ...grpc.CallOption) (*NsidStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(NsidStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceAdd_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) CreateSubsystem(ctx context.Context, in *CreateSubsystemReq, opts ...grpc.CallOption) (*SubsysStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SubsysStatus) + err := c.cc.Invoke(ctx, Gateway_CreateSubsystem_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) DeleteSubsystem(ctx context.Context, in *DeleteSubsystemReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_DeleteSubsystem_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) ChangeSubsystemKey(ctx context.Context, in *ChangeSubsystemKeyReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_ChangeSubsystemKey_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) ListNamespaces(ctx context.Context, in *ListNamespacesReq, opts ...grpc.CallOption) (*NamespacesInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(NamespacesInfo) + err := c.cc.Invoke(ctx, Gateway_ListNamespaces_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceResize(ctx context.Context, in *NamespaceResizeReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceResize_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceGetIoStats(ctx context.Context, in *NamespaceGetIoStatsReq, opts ...grpc.CallOption) (*NamespaceIoStatsInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(NamespaceIoStatsInfo) + err := c.cc.Invoke(ctx, Gateway_NamespaceGetIoStats_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceSetQosLimits(ctx context.Context, in *NamespaceSetQosReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceSetQosLimits_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceChangeLoadBalancingGroup(ctx context.Context, in *NamespaceChangeLoadBalancingGroupReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceChangeLoadBalancingGroup_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceChangeVisibility(ctx context.Context, in *NamespaceChangeVisibilityReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceChangeVisibility_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceSetRbdTrashImage(ctx context.Context, in *NamespaceSetRbdTrashImageReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceSetRbdTrashImage_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceSetAutoResize(ctx context.Context, in *NamespaceSetAutoResizeReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceSetAutoResize_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceDelete(ctx context.Context, in *NamespaceDeleteReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceDelete_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceAddHost(ctx context.Context, in *NamespaceAddHostReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceAddHost_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) NamespaceDeleteHost(ctx context.Context, in *NamespaceDeleteHostReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_NamespaceDeleteHost_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) AddHost(ctx context.Context, in *AddHostReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_AddHost_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) RemoveHost(ctx context.Context, in *RemoveHostReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_RemoveHost_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) ChangeHostKey(ctx context.Context, in *ChangeHostKeyReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_ChangeHostKey_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) ListHosts(ctx context.Context, in *ListHostsReq, opts ...grpc.CallOption) (*HostsInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HostsInfo) + err := c.cc.Invoke(ctx, Gateway_ListHosts_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) ListConnections(ctx context.Context, in *ListConnectionsReq, opts ...grpc.CallOption) (*ConnectionsInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ConnectionsInfo) + err := c.cc.Invoke(ctx, Gateway_ListConnections_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) CreateListener(ctx context.Context, in *CreateListenerReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_CreateListener_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) DeleteListener(ctx context.Context, in *DeleteListenerReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_DeleteListener_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) ListListeners(ctx context.Context, in *ListListenersReq, opts ...grpc.CallOption) (*ListenersInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListenersInfo) + err := c.cc.Invoke(ctx, Gateway_ListListeners_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) ListSubsystems(ctx context.Context, in *ListSubsystemsReq, opts ...grpc.CallOption) (*SubsystemsInfoCli, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SubsystemsInfoCli) + err := c.cc.Invoke(ctx, Gateway_ListSubsystems_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) GetSubsystems(ctx context.Context, in *GetSubsystemsReq, opts ...grpc.CallOption) (*SubsystemsInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SubsystemsInfo) + err := c.cc.Invoke(ctx, Gateway_GetSubsystems_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) SetAnaState(ctx context.Context, in *AnaInfo, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_SetAnaState_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) GetSpdkNvmfLogFlagsAndLevel(ctx context.Context, in *GetSpdkNvmfLogFlagsAndLevelReq, opts ...grpc.CallOption) (*SpdkNvmfLogFlagsAndLevelInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SpdkNvmfLogFlagsAndLevelInfo) + err := c.cc.Invoke(ctx, Gateway_GetSpdkNvmfLogFlagsAndLevel_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) DisableSpdkNvmfLogs(ctx context.Context, in *DisableSpdkNvmfLogsReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_DisableSpdkNvmfLogs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) SetSpdkNvmfLogs(ctx context.Context, in *SetSpdkNvmfLogsReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_SetSpdkNvmfLogs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) GetGatewayInfo(ctx context.Context, in *GetGatewayInfoReq, opts ...grpc.CallOption) (*GatewayInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GatewayInfo) + err := c.cc.Invoke(ctx, Gateway_GetGatewayInfo_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) GetGatewayLogLevel(ctx context.Context, in *GetGatewayLogLevelReq, opts ...grpc.CallOption) (*GatewayLogLevelInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GatewayLogLevelInfo) + err := c.cc.Invoke(ctx, Gateway_GetGatewayLogLevel_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) SetGatewayLogLevel(ctx context.Context, in *SetGatewayLogLevelReq, opts ...grpc.CallOption) (*ReqStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ReqStatus) + err := c.cc.Invoke(ctx, Gateway_SetGatewayLogLevel_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *gatewayClient) ShowGatewayListenersInfo(ctx context.Context, in *ShowGatewayListenersInfoReq, opts ...grpc.CallOption) (*GatewayListenersInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GatewayListenersInfo) + err := c.cc.Invoke(ctx, Gateway_ShowGatewayListenersInfo_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GatewayServer is the server API for Gateway service. +// All implementations must embed UnimplementedGatewayServer +// for forward compatibility. +type GatewayServer interface { + // Creates a namespace from an RBD image + NamespaceAdd(context.Context, *NamespaceAddReq) (*NsidStatus, error) + // Creates a subsystem + CreateSubsystem(context.Context, *CreateSubsystemReq) (*SubsysStatus, error) + // Deletes a subsystem + DeleteSubsystem(context.Context, *DeleteSubsystemReq) (*ReqStatus, error) + // Changes subsystem key + ChangeSubsystemKey(context.Context, *ChangeSubsystemKeyReq) (*ReqStatus, error) + // List namespaces + ListNamespaces(context.Context, *ListNamespacesReq) (*NamespacesInfo, error) + // Resizes a namespace + NamespaceResize(context.Context, *NamespaceResizeReq) (*ReqStatus, error) + // Gets namespace's IO stats + NamespaceGetIoStats(context.Context, *NamespaceGetIoStatsReq) (*NamespaceIoStatsInfo, error) + // Sets namespace's qos limits + NamespaceSetQosLimits(context.Context, *NamespaceSetQosReq) (*ReqStatus, error) + // Changes namespace's load balancing group + NamespaceChangeLoadBalancingGroup(context.Context, *NamespaceChangeLoadBalancingGroupReq) (*ReqStatus, error) + // Changes namespace's visibility + NamespaceChangeVisibility(context.Context, *NamespaceChangeVisibilityReq) (*ReqStatus, error) + // Set namespace's RBD trash image flag + NamespaceSetRbdTrashImage(context.Context, *NamespaceSetRbdTrashImageReq) (*ReqStatus, error) + // Set namespace's auto resize flag + NamespaceSetAutoResize(context.Context, *NamespaceSetAutoResizeReq) (*ReqStatus, error) + // Deletes a namespace + NamespaceDelete(context.Context, *NamespaceDeleteReq) (*ReqStatus, error) + // Adds a host to a namespace + NamespaceAddHost(context.Context, *NamespaceAddHostReq) (*ReqStatus, error) + // Deletes a host from a namespace + NamespaceDeleteHost(context.Context, *NamespaceDeleteHostReq) (*ReqStatus, error) + // Adds a host to a subsystem + AddHost(context.Context, *AddHostReq) (*ReqStatus, error) + // Removes a host from a subsystem + RemoveHost(context.Context, *RemoveHostReq) (*ReqStatus, error) + // Changes a host inband authentication keys + ChangeHostKey(context.Context, *ChangeHostKeyReq) (*ReqStatus, error) + // List hosts + ListHosts(context.Context, *ListHostsReq) (*HostsInfo, error) + // List connections + ListConnections(context.Context, *ListConnectionsReq) (*ConnectionsInfo, error) + // Creates a listener for a subsystem at a given IP/Port + CreateListener(context.Context, *CreateListenerReq) (*ReqStatus, error) + // Deletes a listener from a subsystem at a given IP/Port + DeleteListener(context.Context, *DeleteListenerReq) (*ReqStatus, error) + // List listeners + ListListeners(context.Context, *ListListenersReq) (*ListenersInfo, error) + // List subsystems + ListSubsystems(context.Context, *ListSubsystemsReq) (*SubsystemsInfoCli, error) + // Gets subsystems + GetSubsystems(context.Context, *GetSubsystemsReq) (*SubsystemsInfo, error) + // Set gateway ANA states + SetAnaState(context.Context, *AnaInfo) (*ReqStatus, error) + // Gets spdk nvmf log flags and level + GetSpdkNvmfLogFlagsAndLevel(context.Context, *GetSpdkNvmfLogFlagsAndLevelReq) (*SpdkNvmfLogFlagsAndLevelInfo, error) + // Disables spdk nvmf logs + DisableSpdkNvmfLogs(context.Context, *DisableSpdkNvmfLogsReq) (*ReqStatus, error) + // Set spdk nvmf logs + SetSpdkNvmfLogs(context.Context, *SetSpdkNvmfLogsReq) (*ReqStatus, error) + // Get gateway info + GetGatewayInfo(context.Context, *GetGatewayInfoReq) (*GatewayInfo, error) + // Get gateway log level + GetGatewayLogLevel(context.Context, *GetGatewayLogLevelReq) (*GatewayLogLevelInfo, error) + // Set gateway log level + SetGatewayLogLevel(context.Context, *SetGatewayLogLevelReq) (*ReqStatus, error) + // Show gateway listeners info + ShowGatewayListenersInfo(context.Context, *ShowGatewayListenersInfoReq) (*GatewayListenersInfo, error) + mustEmbedUnimplementedGatewayServer() +} + +// UnimplementedGatewayServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedGatewayServer struct{} + +func (UnimplementedGatewayServer) NamespaceAdd(context.Context, *NamespaceAddReq) (*NsidStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceAdd not implemented") +} +func (UnimplementedGatewayServer) CreateSubsystem(context.Context, *CreateSubsystemReq) (*SubsysStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSubsystem not implemented") +} +func (UnimplementedGatewayServer) DeleteSubsystem(context.Context, *DeleteSubsystemReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSubsystem not implemented") +} +func (UnimplementedGatewayServer) ChangeSubsystemKey(context.Context, *ChangeSubsystemKeyReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChangeSubsystemKey not implemented") +} +func (UnimplementedGatewayServer) ListNamespaces(context.Context, *ListNamespacesReq) (*NamespacesInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNamespaces not implemented") +} +func (UnimplementedGatewayServer) NamespaceResize(context.Context, *NamespaceResizeReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceResize not implemented") +} +func (UnimplementedGatewayServer) NamespaceGetIoStats(context.Context, *NamespaceGetIoStatsReq) (*NamespaceIoStatsInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceGetIoStats not implemented") +} +func (UnimplementedGatewayServer) NamespaceSetQosLimits(context.Context, *NamespaceSetQosReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceSetQosLimits not implemented") +} +func (UnimplementedGatewayServer) NamespaceChangeLoadBalancingGroup(context.Context, *NamespaceChangeLoadBalancingGroupReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceChangeLoadBalancingGroup not implemented") +} +func (UnimplementedGatewayServer) NamespaceChangeVisibility(context.Context, *NamespaceChangeVisibilityReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceChangeVisibility not implemented") +} +func (UnimplementedGatewayServer) NamespaceSetRbdTrashImage(context.Context, *NamespaceSetRbdTrashImageReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceSetRbdTrashImage not implemented") +} +func (UnimplementedGatewayServer) NamespaceSetAutoResize(context.Context, *NamespaceSetAutoResizeReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceSetAutoResize not implemented") +} +func (UnimplementedGatewayServer) NamespaceDelete(context.Context, *NamespaceDeleteReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceDelete not implemented") +} +func (UnimplementedGatewayServer) NamespaceAddHost(context.Context, *NamespaceAddHostReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceAddHost not implemented") +} +func (UnimplementedGatewayServer) NamespaceDeleteHost(context.Context, *NamespaceDeleteHostReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method NamespaceDeleteHost not implemented") +} +func (UnimplementedGatewayServer) AddHost(context.Context, *AddHostReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddHost not implemented") +} +func (UnimplementedGatewayServer) RemoveHost(context.Context, *RemoveHostReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveHost not implemented") +} +func (UnimplementedGatewayServer) ChangeHostKey(context.Context, *ChangeHostKeyReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChangeHostKey not implemented") +} +func (UnimplementedGatewayServer) ListHosts(context.Context, *ListHostsReq) (*HostsInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListHosts not implemented") +} +func (UnimplementedGatewayServer) ListConnections(context.Context, *ListConnectionsReq) (*ConnectionsInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListConnections not implemented") +} +func (UnimplementedGatewayServer) CreateListener(context.Context, *CreateListenerReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateListener not implemented") +} +func (UnimplementedGatewayServer) DeleteListener(context.Context, *DeleteListenerReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteListener not implemented") +} +func (UnimplementedGatewayServer) ListListeners(context.Context, *ListListenersReq) (*ListenersInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListListeners not implemented") +} +func (UnimplementedGatewayServer) ListSubsystems(context.Context, *ListSubsystemsReq) (*SubsystemsInfoCli, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSubsystems not implemented") +} +func (UnimplementedGatewayServer) GetSubsystems(context.Context, *GetSubsystemsReq) (*SubsystemsInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSubsystems not implemented") +} +func (UnimplementedGatewayServer) SetAnaState(context.Context, *AnaInfo) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetAnaState not implemented") +} +func (UnimplementedGatewayServer) GetSpdkNvmfLogFlagsAndLevel(context.Context, *GetSpdkNvmfLogFlagsAndLevelReq) (*SpdkNvmfLogFlagsAndLevelInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSpdkNvmfLogFlagsAndLevel not implemented") +} +func (UnimplementedGatewayServer) DisableSpdkNvmfLogs(context.Context, *DisableSpdkNvmfLogsReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method DisableSpdkNvmfLogs not implemented") +} +func (UnimplementedGatewayServer) SetSpdkNvmfLogs(context.Context, *SetSpdkNvmfLogsReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetSpdkNvmfLogs not implemented") +} +func (UnimplementedGatewayServer) GetGatewayInfo(context.Context, *GetGatewayInfoReq) (*GatewayInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGatewayInfo not implemented") +} +func (UnimplementedGatewayServer) GetGatewayLogLevel(context.Context, *GetGatewayLogLevelReq) (*GatewayLogLevelInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGatewayLogLevel not implemented") +} +func (UnimplementedGatewayServer) SetGatewayLogLevel(context.Context, *SetGatewayLogLevelReq) (*ReqStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetGatewayLogLevel not implemented") +} +func (UnimplementedGatewayServer) ShowGatewayListenersInfo(context.Context, *ShowGatewayListenersInfoReq) (*GatewayListenersInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method ShowGatewayListenersInfo not implemented") +} +func (UnimplementedGatewayServer) mustEmbedUnimplementedGatewayServer() {} +func (UnimplementedGatewayServer) testEmbeddedByValue() {} + +// UnsafeGatewayServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GatewayServer will +// result in compilation errors. +type UnsafeGatewayServer interface { + mustEmbedUnimplementedGatewayServer() +} + +func RegisterGatewayServer(s grpc.ServiceRegistrar, srv GatewayServer) { + // If the following call pancis, it indicates UnimplementedGatewayServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Gateway_ServiceDesc, srv) +} + +func _Gateway_NamespaceAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceAddReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceAdd_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceAdd(ctx, req.(*NamespaceAddReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_CreateSubsystem_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSubsystemReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).CreateSubsystem(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_CreateSubsystem_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).CreateSubsystem(ctx, req.(*CreateSubsystemReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_DeleteSubsystem_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSubsystemReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).DeleteSubsystem(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_DeleteSubsystem_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).DeleteSubsystem(ctx, req.(*DeleteSubsystemReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_ChangeSubsystemKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ChangeSubsystemKeyReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).ChangeSubsystemKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_ChangeSubsystemKey_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).ChangeSubsystemKey(ctx, req.(*ChangeSubsystemKeyReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_ListNamespaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNamespacesReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).ListNamespaces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_ListNamespaces_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).ListNamespaces(ctx, req.(*ListNamespacesReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceResize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceResizeReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceResize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceResize_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceResize(ctx, req.(*NamespaceResizeReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceGetIoStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceGetIoStatsReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceGetIoStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceGetIoStats_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceGetIoStats(ctx, req.(*NamespaceGetIoStatsReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceSetQosLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceSetQosReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceSetQosLimits(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceSetQosLimits_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceSetQosLimits(ctx, req.(*NamespaceSetQosReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceChangeLoadBalancingGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceChangeLoadBalancingGroupReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceChangeLoadBalancingGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceChangeLoadBalancingGroup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceChangeLoadBalancingGroup(ctx, req.(*NamespaceChangeLoadBalancingGroupReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceChangeVisibility_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceChangeVisibilityReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceChangeVisibility(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceChangeVisibility_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceChangeVisibility(ctx, req.(*NamespaceChangeVisibilityReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceSetRbdTrashImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceSetRbdTrashImageReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceSetRbdTrashImage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceSetRbdTrashImage_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceSetRbdTrashImage(ctx, req.(*NamespaceSetRbdTrashImageReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceSetAutoResize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceSetAutoResizeReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceSetAutoResize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceSetAutoResize_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceSetAutoResize(ctx, req.(*NamespaceSetAutoResizeReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceDeleteReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceDelete_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceDelete(ctx, req.(*NamespaceDeleteReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceAddHost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceAddHostReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceAddHost(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceAddHost_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceAddHost(ctx, req.(*NamespaceAddHostReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_NamespaceDeleteHost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NamespaceDeleteHostReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).NamespaceDeleteHost(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_NamespaceDeleteHost_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).NamespaceDeleteHost(ctx, req.(*NamespaceDeleteHostReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_AddHost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddHostReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).AddHost(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_AddHost_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).AddHost(ctx, req.(*AddHostReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_RemoveHost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveHostReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).RemoveHost(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_RemoveHost_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).RemoveHost(ctx, req.(*RemoveHostReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_ChangeHostKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ChangeHostKeyReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).ChangeHostKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_ChangeHostKey_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).ChangeHostKey(ctx, req.(*ChangeHostKeyReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_ListHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListHostsReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).ListHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_ListHosts_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).ListHosts(ctx, req.(*ListHostsReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_ListConnections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListConnectionsReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).ListConnections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_ListConnections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).ListConnections(ctx, req.(*ListConnectionsReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_CreateListener_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateListenerReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).CreateListener(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_CreateListener_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).CreateListener(ctx, req.(*CreateListenerReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_DeleteListener_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteListenerReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).DeleteListener(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_DeleteListener_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).DeleteListener(ctx, req.(*DeleteListenerReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_ListListeners_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListListenersReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).ListListeners(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_ListListeners_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).ListListeners(ctx, req.(*ListListenersReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_ListSubsystems_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSubsystemsReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).ListSubsystems(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_ListSubsystems_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).ListSubsystems(ctx, req.(*ListSubsystemsReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_GetSubsystems_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubsystemsReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).GetSubsystems(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_GetSubsystems_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).GetSubsystems(ctx, req.(*GetSubsystemsReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_SetAnaState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnaInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).SetAnaState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_SetAnaState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).SetAnaState(ctx, req.(*AnaInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_GetSpdkNvmfLogFlagsAndLevel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSpdkNvmfLogFlagsAndLevelReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).GetSpdkNvmfLogFlagsAndLevel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_GetSpdkNvmfLogFlagsAndLevel_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).GetSpdkNvmfLogFlagsAndLevel(ctx, req.(*GetSpdkNvmfLogFlagsAndLevelReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_DisableSpdkNvmfLogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DisableSpdkNvmfLogsReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).DisableSpdkNvmfLogs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_DisableSpdkNvmfLogs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).DisableSpdkNvmfLogs(ctx, req.(*DisableSpdkNvmfLogsReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_SetSpdkNvmfLogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetSpdkNvmfLogsReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).SetSpdkNvmfLogs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_SetSpdkNvmfLogs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).SetSpdkNvmfLogs(ctx, req.(*SetSpdkNvmfLogsReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_GetGatewayInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGatewayInfoReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).GetGatewayInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_GetGatewayInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).GetGatewayInfo(ctx, req.(*GetGatewayInfoReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_GetGatewayLogLevel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGatewayLogLevelReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).GetGatewayLogLevel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_GetGatewayLogLevel_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).GetGatewayLogLevel(ctx, req.(*GetGatewayLogLevelReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_SetGatewayLogLevel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetGatewayLogLevelReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).SetGatewayLogLevel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_SetGatewayLogLevel_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).SetGatewayLogLevel(ctx, req.(*SetGatewayLogLevelReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Gateway_ShowGatewayListenersInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShowGatewayListenersInfoReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GatewayServer).ShowGatewayListenersInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Gateway_ShowGatewayListenersInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GatewayServer).ShowGatewayListenersInfo(ctx, req.(*ShowGatewayListenersInfoReq)) + } + return interceptor(ctx, in, info, handler) +} + +// Gateway_ServiceDesc is the grpc.ServiceDesc for Gateway service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Gateway_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "Gateway", + HandlerType: (*GatewayServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "namespace_add", + Handler: _Gateway_NamespaceAdd_Handler, + }, + { + MethodName: "create_subsystem", + Handler: _Gateway_CreateSubsystem_Handler, + }, + { + MethodName: "delete_subsystem", + Handler: _Gateway_DeleteSubsystem_Handler, + }, + { + MethodName: "change_subsystem_key", + Handler: _Gateway_ChangeSubsystemKey_Handler, + }, + { + MethodName: "list_namespaces", + Handler: _Gateway_ListNamespaces_Handler, + }, + { + MethodName: "namespace_resize", + Handler: _Gateway_NamespaceResize_Handler, + }, + { + MethodName: "namespace_get_io_stats", + Handler: _Gateway_NamespaceGetIoStats_Handler, + }, + { + MethodName: "namespace_set_qos_limits", + Handler: _Gateway_NamespaceSetQosLimits_Handler, + }, + { + MethodName: "namespace_change_load_balancing_group", + Handler: _Gateway_NamespaceChangeLoadBalancingGroup_Handler, + }, + { + MethodName: "namespace_change_visibility", + Handler: _Gateway_NamespaceChangeVisibility_Handler, + }, + { + MethodName: "namespace_set_rbd_trash_image", + Handler: _Gateway_NamespaceSetRbdTrashImage_Handler, + }, + { + MethodName: "namespace_set_auto_resize", + Handler: _Gateway_NamespaceSetAutoResize_Handler, + }, + { + MethodName: "namespace_delete", + Handler: _Gateway_NamespaceDelete_Handler, + }, + { + MethodName: "namespace_add_host", + Handler: _Gateway_NamespaceAddHost_Handler, + }, + { + MethodName: "namespace_delete_host", + Handler: _Gateway_NamespaceDeleteHost_Handler, + }, + { + MethodName: "add_host", + Handler: _Gateway_AddHost_Handler, + }, + { + MethodName: "remove_host", + Handler: _Gateway_RemoveHost_Handler, + }, + { + MethodName: "change_host_key", + Handler: _Gateway_ChangeHostKey_Handler, + }, + { + MethodName: "list_hosts", + Handler: _Gateway_ListHosts_Handler, + }, + { + MethodName: "list_connections", + Handler: _Gateway_ListConnections_Handler, + }, + { + MethodName: "create_listener", + Handler: _Gateway_CreateListener_Handler, + }, + { + MethodName: "delete_listener", + Handler: _Gateway_DeleteListener_Handler, + }, + { + MethodName: "list_listeners", + Handler: _Gateway_ListListeners_Handler, + }, + { + MethodName: "list_subsystems", + Handler: _Gateway_ListSubsystems_Handler, + }, + { + MethodName: "get_subsystems", + Handler: _Gateway_GetSubsystems_Handler, + }, + { + MethodName: "set_ana_state", + Handler: _Gateway_SetAnaState_Handler, + }, + { + MethodName: "get_spdk_nvmf_log_flags_and_level", + Handler: _Gateway_GetSpdkNvmfLogFlagsAndLevel_Handler, + }, + { + MethodName: "disable_spdk_nvmf_logs", + Handler: _Gateway_DisableSpdkNvmfLogs_Handler, + }, + { + MethodName: "set_spdk_nvmf_logs", + Handler: _Gateway_SetSpdkNvmfLogs_Handler, + }, + { + MethodName: "get_gateway_info", + Handler: _Gateway_GetGatewayInfo_Handler, + }, + { + MethodName: "get_gateway_log_level", + Handler: _Gateway_GetGatewayLogLevel_Handler, + }, + { + MethodName: "set_gateway_log_level", + Handler: _Gateway_SetGatewayLogLevel_Handler, + }, + { + MethodName: "show_gateway_listeners_info", + Handler: _Gateway_ShowGatewayListenersInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "gateway.proto", +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4c65a927c7f..3379c9c6744 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -196,6 +196,9 @@ github.com/ceph/ceph-csi/api/deploy/kubernetes/cephfs github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs github.com/ceph/ceph-csi/api/deploy/kubernetes/rbd github.com/ceph/ceph-csi/api/deploy/ocp +# github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423 +## explicit; go 1.24.4 +github.com/ceph/ceph-nvmeof/lib/go/nvmeof # github.com/ceph/go-ceph v0.34.0 ## explicit; go 1.23.0 github.com/ceph/go-ceph/cephfs From d7dbe63ccbfd072d9f36304fbc7e4d98cb68a02e Mon Sep 17 00:00:00 2001 From: Rakshith R Date: Mon, 18 Aug 2025 19:15:31 +0530 Subject: [PATCH 005/157] ci: upgrade from v3.14.2 for upgrade tests Signed-off-by: Rakshith R --- build.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.env b/build.env index 5fdb38fa0b3..d36f2c4e8d7 100644 --- a/build.env +++ b/build.env @@ -12,7 +12,7 @@ CSI_IMAGE_VERSION=canary # cephcsi upgrade version -CSI_UPGRADE_VERSION=v3.13.1 +CSI_UPGRADE_VERSION=v3.14.2 # ceph-csi-operator version used for e2e test # TODO: use release tag which includes the bug fixes From dc1f3d7c8fc46e66387cf3281654211719e34e9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 07:46:18 +0000 Subject: [PATCH 006/157] rebase: bump k8s.io/kubernetes in the k8s-dependencies group Bumps the k8s-dependencies group with 1 update: [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes). Updates `k8s.io/kubernetes` from 1.33.3 to 1.33.4 - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.33.3...v1.33.4) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-version: 1.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 042907acd0e..057c15d6a9d 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( k8s.io/apimachinery v0.33.3 k8s.io/cloud-provider v0.33.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kubernetes v1.33.3 + k8s.io/kubernetes v1.33.4 k8s.io/mount-utils v0.33.3 k8s.io/utils v0.0.0-20241210054802-24370beab758 ) diff --git a/go.sum b/go.sum index a2a49178de1..900f0c9eb6e 100644 --- a/go.sum +++ b/go.sum @@ -1385,8 +1385,8 @@ k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubernetes v1.33.3 h1:dBx5Z2ZhR8kNzAwCoCz4j1niUbUrNUDVxeSj4/Ienu0= -k8s.io/kubernetes v1.33.3/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00= +k8s.io/kubernetes v1.33.4 h1:T1d5FLUYm3/KyUeV7YJhKTR980zHCHb7K2xhCSo3lE8= +k8s.io/kubernetes v1.33.4/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00= k8s.io/mount-utils v0.33.3 h1:Q1jsnqdS4LdtJSYSXgiQv/XNrRHQncLk3gMYjKNSZrE= k8s.io/mount-utils v0.33.3/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/vendor/modules.txt b/vendor/modules.txt index 3379c9c6744..0f6cceb0d39 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1148,7 +1148,7 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubernetes v1.33.3 +# k8s.io/kubernetes v1.33.4 ## explicit; go 1.24.0 k8s.io/kubernetes/pkg/features k8s.io/kubernetes/pkg/kubelet/events From 913f5c597bce99f919e68dc420057167ca5a7c52 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 14 Aug 2025 13:09:10 +0530 Subject: [PATCH 007/157] util: always advertise Controller PUBLISH_UNPUBLISH cap This commit changes to always advertise controller PUBLICH_UNPUBLISH capability to avoid stale volumeattachments. Previously, the Controller PUBLISH_UNPUBLISH capability was only advertised when the 'enable-fencing' flag was set. In this setup, the external-attacher would add a finalizer to the VolumeAttachment. If the user later disabled fencing by setting enable-fencing to false, the capability would no longer be advertised, causing the external-attacher to skip finalizer removal. This left the VolumeAttachment stuck in the deletion phase. Signed-off-by: Praveen M --- internal/cephfs/driver.go | 14 +++----------- internal/rbd/driver/driver.go | 13 +++---------- 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/internal/cephfs/driver.go b/internal/cephfs/driver.go index 8614f4592c2..808ef11da94 100644 --- a/internal/cephfs/driver.go +++ b/internal/cephfs/driver.go @@ -134,22 +134,14 @@ func (fs *Driver) Run(conf *util.Config) { } if conf.IsControllerServer || !conf.IsNodeServer { - controllerServiceCapabilities := []csi.ControllerServiceCapability_RPC_Type{ + fs.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, csi.ControllerServiceCapability_RPC_CLONE_VOLUME, csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER, - } - // if fencing is enabled, we can add the PUBLISH_UNPUBLISH_VOLUME capability. - if fs.cd.IsFencingEnabled() { - controllerServiceCapabilities = append( - controllerServiceCapabilities, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, - ) - } - - fs.cd.AddControllerServiceCapabilities(controllerServiceCapabilities) + csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }) fs.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{ csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, diff --git a/internal/rbd/driver/driver.go b/internal/rbd/driver/driver.go index 9e0f1fc11a3..bf40362eb5f 100644 --- a/internal/rbd/driver/driver.go +++ b/internal/rbd/driver/driver.go @@ -110,20 +110,13 @@ func (r *Driver) Run(conf *util.Config) { log.FatalLogMsg("Failed to initialize CSI Driver.") } if conf.IsControllerServer || !conf.IsNodeServer { - controllerServiceCapabilities := []csi.ControllerServiceCapability_RPC_Type{ + r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, csi.ControllerServiceCapability_RPC_CLONE_VOLUME, csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, - } - // if fencing is enabled, we can add the PUBLISH_UNPUBLISH_VOLUME capability. - if r.cd.IsFencingEnabled() { - controllerServiceCapabilities = append( - controllerServiceCapabilities, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, - ) - } - r.cd.AddControllerServiceCapabilities(controllerServiceCapabilities) + csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }) // We only support the multi-writer option when using block, but it's a supported capability for the plugin in // general From 3e256d86d1d46a8ebf3876139b07c3e7a5acf11f Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 24 Jul 2025 14:21:00 +0530 Subject: [PATCH 008/157] cleanup: avoid duplication for GetControllerPublishSecret Signed-off-by: Praveen M --- internal/cephfs/controllerserver.go | 176 +++++++++++++++------------- internal/rbd/controllerserver.go | 150 +++++++++++++----------- internal/util/util.go | 36 +++--- 3 files changed, 194 insertions(+), 168 deletions(-) diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index 97bc4ec7ad7..3827b4f0537 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -1143,12 +1143,42 @@ func (cs *ControllerServer) ControllerPublishVolume( return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") } - if acquired := cs.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired { - return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId()) + volumeId := req.GetVolumeId() + if acquired := cs.VolumeLocks.TryAcquire(volumeId); !acquired { + return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeId) } - defer cs.VolumeLocks.Release(req.GetVolumeId()) + defer cs.VolumeLocks.Release(volumeId) - err := cs.unfenceNode(ctx, req.GetNodeId(), req.GetVolumeId(), req.GetSecrets()) + secrets := req.GetSecrets() + if secrets == nil { + secretName, secretNamespace, err := util.GetControllerPublishSecretRef(volumeId, util.CephFsType) + if err != nil { + log.WarningLog(ctx, "controller publish secret not found: %v", err) + + // If the secret is not found, return success to not break for older PVs + // without controller-publish secrets. + return &csi.ControllerPublishVolumeResponse{}, nil + } + + secrets, err = k8s.GetSecret(secretName, secretNamespace) + if err != nil { + return nil, fmt.Errorf("failed to get controller publish secret from k8s: %w", err) + } + } + + credentials, err := util.NewAdminCredentials(secrets) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to create credentials from secrets: %v", err) + } + defer credentials.DeleteCredentials() + + volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, volumeId, nil, secrets, cs.ClusterName, cs.SetMetadata) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to generate volume from volume ID %s: %v", volumeId, err) + } + defer volOptions.Destroy() + + err = cs.unfenceNode(ctx, req.GetNodeId(), volOptions, credentials) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -1173,12 +1203,42 @@ func (cs *ControllerServer) ControllerUnpublishVolume( return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") } - if acquired := cs.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired { - return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId()) + volumeId := req.GetVolumeId() + if acquired := cs.VolumeLocks.TryAcquire(volumeId); !acquired { + return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeId) } - defer cs.VolumeLocks.Release(req.GetVolumeId()) + defer cs.VolumeLocks.Release(volumeId) - err := cs.fenceNode(ctx, req.GetNodeId(), req.GetVolumeId(), req.GetSecrets()) + secrets := req.GetSecrets() + if secrets == nil { + secretName, secretNamespace, err := util.GetControllerPublishSecretRef(volumeId, util.CephFsType) + if err != nil { + log.WarningLog(ctx, "controller publish secret not found: %v", err) + + // If the secret is not found, return success to not break for older PVs + // without controller-publish secrets. + return &csi.ControllerUnpublishVolumeResponse{}, nil + } + + secrets, err = k8s.GetSecret(secretName, secretNamespace) + if err != nil { + return nil, fmt.Errorf("failed to get controller publish secret from k8s: %w", err) + } + } + + credentials, err := util.NewAdminCredentials(secrets) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to create credentials from secrets: %v", err) + } + defer credentials.DeleteCredentials() + + volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, volumeId, nil, secrets, cs.ClusterName, cs.SetMetadata) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to generate volume from volume ID %s: %v", volumeId, err) + } + defer volOptions.Destroy() + + err = cs.fenceNode(ctx, req.GetNodeId(), volOptions, credentials) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -1190,9 +1250,8 @@ func (cs *ControllerServer) ControllerUnpublishVolume( // // Parameters: // - nodeId: The ID of the node that may be fenced. -// - volumeId: The CSI volume ID corresponding to the CephFS subvolume. -// - secrets: A map of secrets. If nil, the function attempts to retrieve the -// controller publish secret from the ceph-csi-config ConfigMap. +// - volOptions: The volume options for the CephFS subvolume. +// - credentials: The credentials to access the Ceph cluster. // // Behavior: // - If `IsFencingEnabled` is false, the function returns immediately without @@ -1205,8 +1264,9 @@ func (cs *ControllerServer) ControllerUnpublishVolume( // Returns an error if any step in the fencing process fails. func (cs *ControllerServer) fenceNode( ctx context.Context, - nodeId, volumeId string, - secrets map[string]string, + nodeId string, + volOptions *store.VolumeOptions, + credentials *util.Credentials, ) error { if nodeId == "" { return errors.New("node ID cannot be empty") @@ -1214,35 +1274,11 @@ func (cs *ControllerServer) fenceNode( if !cs.Driver.IsFencingEnabled() { return nil } - if secrets == nil { - var vi util.CSIIdentifier - err := vi.DecomposeCSIID(volumeId) - if err != nil { - return fmt.Errorf("error decoding volume ID (%s): %w", volumeId, err) - } - - secrets, err = util.GetControllerPublishSecret(vi.ClusterID, util.CephFsType) - if err != nil { - log.WarningLog(ctx, "controller publish secret not found: %v", err) - - return nil - } - } - credentials, err := util.NewAdminCredentials(secrets) - if err != nil { - return fmt.Errorf("failed to create credentials from secrets: %w", err) - } - defer credentials.DeleteCredentials() - - volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, volumeId, nil, secrets, cs.ClusterName, cs.SetMetadata) - if err != nil { - return fmt.Errorf("failed to generate volume from volume ID %s: %w", volumeId, err) - } - defer volOptions.Destroy() - - volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID, - cs.ClusterName, cs.SetMetadata) + volClient := core.NewSubVolume( + volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID, + cs.ClusterName, cs.SetMetadata, + ) isOutOfService, err := k8s.IsNodeOutOfService(nodeId) if err != nil { return fmt.Errorf("failed to check if node %s is out of service: %w", nodeId, err) @@ -1265,7 +1301,8 @@ func (cs *ControllerServer) fenceNode( metadata, err := volClient.ListMetadata() if err != nil { - return fmt.Errorf("failed to list metadata %s for volumeId %s: %w", metadataKey, volumeId, err) + return fmt.Errorf("failed to list metadata %s for subvolume %s: %w", + metadataKey, volOptions.SubVolume.VolID, err) } clientAddress, exists := metadata[metadataKey] @@ -1273,13 +1310,13 @@ func (cs *ControllerServer) fenceNode( // If the metadata doesn't exist, we do not need to add it to the blocklist // as it does not contain any client address to block. log.DebugLog(ctx, "metadata %s for subvolume %s doesn't exists, skipping unfencing for nodeId %s", - metadataKey, volumeId, nodeId) + metadataKey, volOptions.SubVolume.VolID, nodeId) return nil } if clientAddress == "" { - return fmt.Errorf("client address metadata %s for volumeId %s is empty", metadataKey, volumeId) + return fmt.Errorf("client address metadata %s for subvolume %s is empty", metadataKey, volOptions.SubVolume.VolID) } err = util.AddCephBlocklist(ctx, volOptions.Monitors, credentials, clientAddress, false) @@ -1295,9 +1332,8 @@ func (cs *ControllerServer) fenceNode( // // Parameters: // - nodeId: The ID of the node to be unfenced. -// - volumeId: The CSI volume ID associated with the CephFS subvolume. -// - secrets: A map of secrets. If nil, the function attempts to retrieve the -// controller publish secret from the ceph-csi-config ConfigMap. +// - volOptions: The volume options for the CephFS subvolume. +// - credentials: The credentials to access the Ceph cluster. // // Behavior: // - If `IsFencingEnabled` is false, the function returns immediately without @@ -1310,46 +1346,23 @@ func (cs *ControllerServer) fenceNode( // Returns an error if any step in the unfencing process fails. func (cs *ControllerServer) unfenceNode( ctx context.Context, - nodeId, volumeId string, - secrets map[string]string, + nodeId string, + volOptions *store.VolumeOptions, + credentials *util.Credentials, ) error { if !cs.Driver.IsFencingEnabled() { return nil } - if secrets == nil { - var vi util.CSIIdentifier - err := vi.DecomposeCSIID(volumeId) - if err != nil { - return fmt.Errorf("error decoding volume ID (%s): %w", volumeId, err) - } - - secrets, err = util.GetControllerPublishSecret(vi.ClusterID, util.CephFsType) - if err != nil { - log.WarningLog(ctx, "controller publish secret not found: %v", err) - - return nil - } - } - credentials, err := util.NewAdminCredentials(secrets) - if err != nil { - return fmt.Errorf("failed to create credentials from secrets: %w", err) - } - defer credentials.DeleteCredentials() + volClient := core.NewSubVolume( + volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID, + cs.ClusterName, cs.SetMetadata, + ) metadataKey := core.GetClientAddressKey(nodeId) - volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, volumeId, nil, - secrets, cs.ClusterName, cs.SetMetadata) - if err != nil { - return fmt.Errorf("failed to generate volume from volume ID %s: %w", volumeId, err) - } - defer volOptions.Destroy() - - volClient := core.NewSubVolume(volOptions.GetConnection(), - &volOptions.SubVolume, volOptions.ClusterID, cs.ClusterName, cs.SetMetadata) - metadata, err := volClient.ListMetadata() if err != nil { - return fmt.Errorf("failed to list metadata %s for volumeId %s: %w", metadataKey, volumeId, err) + return fmt.Errorf("failed to list metadata %s for subvolume %s: %w", + metadataKey, volOptions.SubVolume.VolID, err) } clientAddress, exists := metadata[metadataKey] @@ -1357,13 +1370,14 @@ func (cs *ControllerServer) unfenceNode( // If the metadata doesn't exist, we do not need to add it to the blocklist // as it does not contain any client address to block. log.DebugLog(ctx, "metadata %s for subvolume %s doesn't exists, skipping unfencing for nodeId %s", - metadataKey, volumeId, nodeId) + metadataKey, volOptions.SubVolume.VolID, nodeId) return nil } if clientAddress == "" { - return fmt.Errorf("client address metadata %s for volumeId %s is empty", metadataKey, volumeId) + return fmt.Errorf("client address metadata %s for subvolume %s is empty", + metadataKey, volOptions.SubVolume.VolID) } if err = util.RemoveCephBlocklist(ctx, volOptions.Monitors, credentials, clientAddress, "", false); err != nil { diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 517cae37b8f..dd32e4fc5d8 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -1705,12 +1705,43 @@ func (cs *ControllerServer) ControllerPublishVolume( return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") } - if acquired := cs.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired { - return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId()) + volumeId := req.GetVolumeId() + if acquired := cs.VolumeLocks.TryAcquire(volumeId); !acquired { + return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeId) } - defer cs.VolumeLocks.Release(req.GetVolumeId()) + defer cs.VolumeLocks.Release(volumeId) - err := cs.unfenceNode(ctx, req.GetNodeId(), req.GetVolumeId(), req.GetSecrets()) + secrets := req.GetSecrets() + if secrets == nil { + secretName, secretNamespace, err := util.GetControllerPublishSecretRef(volumeId, util.RBDType) + if err != nil { + log.WarningLog(ctx, "controller publish secret not found: %v", err) + + // If the secret is not found, return success to not break for older PVs + // without controller-publish secrets. + return &csi.ControllerPublishVolumeResponse{}, nil + } + + secrets, err = k8s.GetSecret(secretName, secretNamespace) + if err != nil { + return nil, fmt.Errorf("failed to get controller publish secret from k8s: %w", err) + } + } + + credentials, err := util.NewAdminCredentials(secrets) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to create credentials from secrets: %v", err) + } + defer credentials.DeleteCredentials() + + rv, err := GenVolFromVolID(ctx, volumeId, credentials, secrets) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to generate volume from volume ID %s: %v", + volumeId, err) + } + defer rv.Destroy(ctx) + + err = cs.unfenceNode(ctx, req.GetNodeId(), rv, credentials) if err != nil { return nil, status.Errorf(codes.Internal, "failed to unfence node %s: %v", req.GetNodeId(), err) } @@ -1734,12 +1765,43 @@ func (cs *ControllerServer) ControllerUnpublishVolume( return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") } - if acquired := cs.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired { - return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId()) + volumeId := req.GetVolumeId() + if acquired := cs.VolumeLocks.TryAcquire(volumeId); !acquired { + return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeId) } - defer cs.VolumeLocks.Release(req.GetVolumeId()) + defer cs.VolumeLocks.Release(volumeId) - err := cs.fenceNode(ctx, req.GetNodeId(), req.GetVolumeId(), req.GetSecrets()) + secrets := req.GetSecrets() + if secrets == nil { + secretName, secretNamespace, err := util.GetControllerPublishSecretRef(volumeId, util.RBDType) + if err != nil { + log.WarningLog(ctx, "controller publish secret not found: %v", err) + + // If the secret is not found, return success to not break for older PVs + // without controller-publish secrets. + return &csi.ControllerUnpublishVolumeResponse{}, nil + } + + secrets, err = k8s.GetSecret(secretName, secretNamespace) + if err != nil { + return nil, fmt.Errorf("failed to get controller publish secret from k8s: %w", err) + } + } + + credentials, err := util.NewAdminCredentials(secrets) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to create credentials from secrets: %v", err) + } + defer credentials.DeleteCredentials() + + rv, err := GenVolFromVolID(ctx, volumeId, credentials, secrets) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to generate volume from volume ID %s: %v", + volumeId, err) + } + defer rv.Destroy(ctx) + + err = cs.fenceNode(ctx, req.GetNodeId(), rv, credentials) if err != nil { return nil, status.Errorf(codes.Internal, "failed to fence node %s: %v", req.GetNodeId(), err) } @@ -1751,9 +1813,8 @@ func (cs *ControllerServer) ControllerUnpublishVolume( // // Parameters: // - nodeId: The ID of the node that may be fenced. -// - volumeId: The CSI volume ID corresponding to the RBD image. -// - secrets: A map of secrets. If nil, the function attempts to retrieve the -// controller publish secret from the ceph-csi-config ConfigMap. +// - rbdVolume: The rbdVolume object representing the RBD image. +// - credentials: The credentials to access the Ceph cluster. // // Behavior: // - If `IsFencingEnabled` is false, the function returns immediately @@ -1766,8 +1827,9 @@ func (cs *ControllerServer) ControllerUnpublishVolume( // Returns an error if any step in the fencing process fails. func (cs *ControllerServer) fenceNode( ctx context.Context, - nodeId, volumeId string, - secrets map[string]string, + nodeId string, + rv *rbdVolume, + credentials *util.Credentials, ) error { if !cs.Driver.IsFencingEnabled() { return nil @@ -1776,34 +1838,7 @@ func (cs *ControllerServer) fenceNode( return errors.New("nodeId cannot be empty") } - if secrets == nil { - var vi util.CSIIdentifier - err := vi.DecomposeCSIID(volumeId) - if err != nil { - return fmt.Errorf("failed to decode volume ID (%s): %w", volumeId, err) - } - - secrets, err = util.GetControllerPublishSecret(vi.ClusterID, util.RBDType) - if err != nil { - log.WarningLog(ctx, "controller publish secret not found: %v", err) - - return nil - } - } - metadataKey := getClientAddressKey(nodeId) - credentials, err := util.NewAdminCredentials(secrets) - if err != nil { - return fmt.Errorf("failed to create credentials from secrets: %w", err) - } - defer credentials.DeleteCredentials() - - rv, err := GenVolFromVolID(ctx, volumeId, credentials, secrets) - if err != nil { - return fmt.Errorf("failed to generate volume from volume ID %q: %w", - volumeId, err) - } - isOutOfService, err := k8s.IsNodeOutOfService(nodeId) if err != nil { return fmt.Errorf("failed to check if node %s is out of service: %w", nodeId, err) @@ -1854,9 +1889,8 @@ func (cs *ControllerServer) fenceNode( // // Parameters: // - nodeId: The ID of the node to be unfenced. -// - volumeId: The CSI volume ID associated with the RBD image. -// - secrets: A map of secrets. If nil, the function attempts to retrieve the -// controller publish secret from the ceph-csi-config ConfigMap. +// - rbdVolume: The rbdVolume object representing the RBD image. +// - credentials: The credentials to access the Ceph cluster. // // Behavior: // - If `IsFencingEnabled` is false, the function returns immediately @@ -1868,39 +1902,15 @@ func (cs *ControllerServer) fenceNode( // Returns an error if any step in the unfencing process fails. func (cs *ControllerServer) unfenceNode( ctx context.Context, - nodeId, volumeId string, - secrets map[string]string, + nodeId string, + rv *rbdVolume, + credentials *util.Credentials, ) error { if !cs.Driver.IsFencingEnabled() { return nil } - if secrets == nil { - var vi util.CSIIdentifier - err := vi.DecomposeCSIID(volumeId) - if err != nil { - return fmt.Errorf("failed to decode volume ID (%s): %w", volumeId, err) - } - - secrets, err = util.GetControllerPublishSecret(vi.ClusterID, util.RBDType) - if err != nil { - log.WarningLog(ctx, "controller publish secret not found: %v", err) - - return nil - } - } metadataKey := getClientAddressKey(nodeId) - credentials, err := util.NewAdminCredentials(secrets) - if err != nil { - return fmt.Errorf("failed to create credentials from secrets: %w", err) - } - defer credentials.DeleteCredentials() - - rv, err := GenVolFromVolID(ctx, volumeId, credentials, secrets) - if err != nil { - return fmt.Errorf("failed to generate volume from volume ID %s: %w", volumeId, err) - } - clientAddress, err := rv.GetMetadata(metadataKey) if err != nil { if errors.Is(err, librbd.ErrNotFound) { diff --git a/internal/util/util.go b/internal/util/util.go index 76178f742f9..757a7d69df1 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -327,10 +327,19 @@ func ParseClientIP(addr string) (string, error) { return "", fmt.Errorf("failed to extract IP address, incorrect format: %s", addr) } -// GetControllerPublishSecret retrieves the controller publish secret from ceph-csi-config ConfigMap +// GetControllerPublishSecretRef retrieves the controller publish secret from ceph-csi-config ConfigMap // for a given clusterID. Fetches the secret from Kubernetes, and returns it as a map of key-value pairs. -func GetControllerPublishSecret(clusterID, driverType string) (map[string]string, error) { - var getSecretRefFunc func(string, string) (string, string, error) +func GetControllerPublishSecretRef(volumeId, driverType string) (string, string, error) { + var ( + vi CSIIdentifier + secretName string + secretNamespace string + getSecretRefFunc func(string, string) (string, string, error) + ) + err := vi.DecomposeCSIID(volumeId) + if err != nil { + return secretName, secretNamespace, fmt.Errorf("failed to decode volume ID (%s): %w", volumeId, err) + } switch driverType { case RBDType: @@ -338,26 +347,19 @@ func GetControllerPublishSecret(clusterID, driverType string) (map[string]string case CephFsType: getSecretRefFunc = GetCephFSControllerPublishSecretRef default: - return nil, fmt.Errorf("unsupported driver type: %s", driverType) + return secretName, secretNamespace, fmt.Errorf("unsupported driver type: %s", driverType) } - secretName, secretNamespace, err := getSecretRefFunc(CsiConfigFile, clusterID) + secretName, secretNamespace, err = getSecretRefFunc(CsiConfigFile, vi.ClusterID) if err != nil { - return nil, fmt.Errorf("failed to get controller publish secret details from csi config file: %w", err) + return secretName, secretNamespace, + fmt.Errorf("failed to get controller publish secret details from csi config file: %w", err) } if secretName == "" || secretNamespace == "" { - return nil, fmt.Errorf("controller publish secret name or namespace is empty"+ - " in csi config file for cluster %s", clusterID) - } - - secrets, err := k8s.GetSecret(secretName, secretNamespace) - if err != nil { - return nil, fmt.Errorf("failed to get controller publish secret from k8s: %w", err) - } - if secrets == nil { - return nil, errors.New("controller publish secret is empty in k8s") + return secretName, secretNamespace, fmt.Errorf("controller publish secret name or namespace is empty"+ + " in csi config file for cluster %s", vi.ClusterID) } - return secrets, nil + return secretName, secretNamespace, nil } From f26b74e0a826ecbc1ad32ea94b21291f8c0a5408 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 14 Aug 2025 13:22:00 +0530 Subject: [PATCH 009/157] cephfs: store clientaddress metadata to snapshot-backed volume Problem: For a snapshot-backed PVC, there is no actual subvolume in the Ceph cluster. Attempting to store metadata would fail as the decoded subvolume name is not found in the cluster. Solution: For a snapshot-backed PVC, store the metadata in its snapshot using the volumeHandle to uniquely identify it. The clientaddress metadata format is now `.cephfs.csi.ceph.com/clientaddress//`. Signed-off-by: Praveen M --- internal/cephfs/controllerserver.go | 155 +++++++++++++++------- internal/cephfs/core/metadata.go | 6 +- internal/cephfs/core/snapshot.go | 2 + internal/cephfs/core/snapshot_metadata.go | 37 ++++++ internal/cephfs/nodeserver.go | 41 +++++- 5 files changed, 181 insertions(+), 60 deletions(-) diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index 3827b4f0537..2936a8e4fe6 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -68,6 +68,13 @@ type ControllerServer struct { SetMetadata bool } +// subvolumeMetadataHandler holds the metadata handling functions. +type subvolumeMetadataHandler struct { + logSubvolumeName string + unsetAllMetadataFunc func([]string) error + listMetadataFunc func() (map[string]string, error) +} + // createBackingVolume creates the backing subvolume and on any error cleans up any created entities. func (cs *ControllerServer) createBackingVolume( ctx context.Context, @@ -1166,19 +1173,13 @@ func (cs *ControllerServer) ControllerPublishVolume( } } - credentials, err := util.NewAdminCredentials(secrets) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to create credentials from secrets: %v", err) - } - defer credentials.DeleteCredentials() - volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, volumeId, nil, secrets, cs.ClusterName, cs.SetMetadata) if err != nil { return nil, status.Errorf(codes.Internal, "failed to generate volume from volume ID %s: %v", volumeId, err) } defer volOptions.Destroy() - err = cs.unfenceNode(ctx, req.GetNodeId(), volOptions, credentials) + err = cs.unfenceNode(ctx, volumeId, req.GetNodeId(), secrets, volOptions) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -1226,19 +1227,13 @@ func (cs *ControllerServer) ControllerUnpublishVolume( } } - credentials, err := util.NewAdminCredentials(secrets) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to create credentials from secrets: %v", err) - } - defer credentials.DeleteCredentials() - volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, volumeId, nil, secrets, cs.ClusterName, cs.SetMetadata) if err != nil { return nil, status.Errorf(codes.Internal, "failed to generate volume from volume ID %s: %v", volumeId, err) } defer volOptions.Destroy() - err = cs.fenceNode(ctx, req.GetNodeId(), volOptions, credentials) + err = cs.fenceNode(ctx, volumeId, req.GetNodeId(), secrets, volOptions) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -1246,12 +1241,57 @@ func (cs *ControllerServer) ControllerUnpublishVolume( return &csi.ControllerUnpublishVolumeResponse{}, nil } +// getSubvolumeMetadataHandler returns the appropriate metadata handler based on whether +// the volume is a backing snapshot or a regular subvolume. +func (cs *ControllerServer) getSubvolumeMetadataHandler( + ctx context.Context, + volOptions *store.VolumeOptions, + secrets map[string]string, +) (*subvolumeMetadataHandler, error) { + conn := volOptions.GetConnection() + cr, err := util.NewAdminCredentials(secrets) + if err != nil { + return nil, fmt.Errorf("failed to create credentials from secrets: %w", err) + } + defer cr.DeleteCredentials() + + if volOptions.BackingSnapshot { + sVolOpts, _, sid, err := store.NewSnapshotOptionsFromID( + ctx, volOptions.BackingSnapshotID, cr, secrets, cs.ClusterName, cs.SetMetadata, + ) + if err != nil { + return nil, err + } + defer sVolOpts.Destroy() + snapClient := core.NewSnapshot( + conn, sid.FsSnapshotName, sVolOpts.ClusterID, cs.ClusterName, cs.SetMetadata, &sVolOpts.SubVolume, + ) + + return &subvolumeMetadataHandler{ + logSubvolumeName: fmt.Sprintf("subvolume snapshot %s/%s", sid.FsSubvolName, sid.FsSnapshotName), + unsetAllMetadataFunc: snapClient.UnsetAllSnapshotMetadata, + listMetadataFunc: snapClient.ListSnapshotMetadata, + }, nil + } + + subvolumeClient := core.NewSubVolume( + conn, &volOptions.SubVolume, volOptions.ClusterID, cs.ClusterName, cs.SetMetadata, + ) + + return &subvolumeMetadataHandler{ + logSubvolumeName: "subvolume " + volOptions.SubVolume.VolID, + unsetAllMetadataFunc: subvolumeClient.UnsetAllMetadata, + listMetadataFunc: subvolumeClient.ListMetadata, + }, nil +} + // fenceNode attempts to fence a client node from accessing the CephFS subvolume. // // Parameters: +// - volumeId: The ID of the volume that may be fenced. // - nodeId: The ID of the node that may be fenced. +// - secrets: The secrets to access the Ceph cluster. // - volOptions: The volume options for the CephFS subvolume. -// - credentials: The credentials to access the Ceph cluster. // // Behavior: // - If `IsFencingEnabled` is false, the function returns immediately without @@ -1264,9 +1304,9 @@ func (cs *ControllerServer) ControllerUnpublishVolume( // Returns an error if any step in the fencing process fails. func (cs *ControllerServer) fenceNode( ctx context.Context, - nodeId string, + volumeId, nodeId string, + secrets map[string]string, volOptions *store.VolumeOptions, - credentials *util.Credentials, ) error { if nodeId == "" { return errors.New("node ID cannot be empty") @@ -1275,51 +1315,58 @@ func (cs *ControllerServer) fenceNode( return nil } - volClient := core.NewSubVolume( - volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID, - cs.ClusterName, cs.SetMetadata, - ) + cr, err := util.NewAdminCredentials(secrets) + if err != nil { + return fmt.Errorf("failed to create credentials from secrets: %w", err) + } + defer cr.DeleteCredentials() + isOutOfService, err := k8s.IsNodeOutOfService(nodeId) if err != nil { return fmt.Errorf("failed to check if node %s is out of service: %w", nodeId, err) } - metadataKey := core.GetClientAddressKey(nodeId) + handler, err := cs.getSubvolumeMetadataHandler(ctx, volOptions, secrets) + if err != nil { + return err + } + + metadataKey := core.GetClientAddressKey(volumeId, nodeId) if !isOutOfService { // If the node is not tainted with `out-of-service`, // we remove the client address metadata from the volume metadata. - err = volClient.UnsetAllMetadata([]string{metadataKey}) + err = handler.unsetAllMetadataFunc([]string{metadataKey}) if err != nil && !errors.Is(err, core.ErrSubVolMetadataNotSupported) { return fmt.Errorf("failed to remove metadata %s from subvolume %s: %w", metadataKey, volOptions.VolID, err) } - log.DebugLog(ctx, "node %s is not out of service, removed metadata %s from subvolume %s if it existed", - nodeId, metadataKey, volOptions.VolID) + log.DebugLog(ctx, "node %s is not out of service, removed metadata %s from %s if it existed", + nodeId, metadataKey, handler.logSubvolumeName) return nil } - metadata, err := volClient.ListMetadata() + metadata, err := handler.listMetadataFunc() if err != nil { - return fmt.Errorf("failed to list metadata %s for subvolume %s: %w", - metadataKey, volOptions.SubVolume.VolID, err) + return fmt.Errorf("failed to list metadata %s for %s: %w", + metadataKey, handler.logSubvolumeName, err) } clientAddress, exists := metadata[metadataKey] if !exists { // If the metadata doesn't exist, we do not need to add it to the blocklist // as it does not contain any client address to block. - log.DebugLog(ctx, "metadata %s for subvolume %s doesn't exists, skipping unfencing for nodeId %s", - metadataKey, volOptions.SubVolume.VolID, nodeId) + log.DebugLog(ctx, "metadata %s for %s doesn't exists, skipping unfencing for nodeId %s", + metadataKey, handler.logSubvolumeName, nodeId) return nil } if clientAddress == "" { - return fmt.Errorf("client address metadata %s for subvolume %s is empty", metadataKey, volOptions.SubVolume.VolID) + return fmt.Errorf("client address metadata %s for %s is empty", metadataKey, handler.logSubvolumeName) } - err = util.AddCephBlocklist(ctx, volOptions.Monitors, credentials, clientAddress, false) + err = util.AddCephBlocklist(ctx, volOptions.Monitors, cr, clientAddress, false) if err != nil { return fmt.Errorf("failed to add client address to blocklist: %w", err) } @@ -1331,9 +1378,10 @@ func (cs *ControllerServer) fenceNode( // allowing it to access the specified CephFS subvolume again. // // Parameters: +// - volumeId: The ID of the volume to be unfenced. // - nodeId: The ID of the node to be unfenced. +// - secrets: The secrets to access the Ceph cluster. // - volOptions: The volume options for the CephFS subvolume. -// - credentials: The credentials to access the Ceph cluster. // // Behavior: // - If `IsFencingEnabled` is false, the function returns immediately without @@ -1346,46 +1394,53 @@ func (cs *ControllerServer) fenceNode( // Returns an error if any step in the unfencing process fails. func (cs *ControllerServer) unfenceNode( ctx context.Context, - nodeId string, + volumeId, nodeId string, + secrets map[string]string, volOptions *store.VolumeOptions, - credentials *util.Credentials, ) error { if !cs.Driver.IsFencingEnabled() { return nil } - volClient := core.NewSubVolume( - volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID, - cs.ClusterName, cs.SetMetadata, - ) - metadataKey := core.GetClientAddressKey(nodeId) - metadata, err := volClient.ListMetadata() + cr, err := util.NewAdminCredentials(secrets) + if err != nil { + return fmt.Errorf("failed to create credentials from secrets: %w", err) + } + defer cr.DeleteCredentials() + + handler, err := cs.getSubvolumeMetadataHandler(ctx, volOptions, secrets) + if err != nil { + return err + } + + metadataKey := core.GetClientAddressKey(volumeId, nodeId) + metadata, err := handler.listMetadataFunc() if err != nil { - return fmt.Errorf("failed to list metadata %s for subvolume %s: %w", - metadataKey, volOptions.SubVolume.VolID, err) + return fmt.Errorf("failed to list metadata %s for %s: %w", + metadataKey, handler.logSubvolumeName, err) } clientAddress, exists := metadata[metadataKey] if !exists { // If the metadata doesn't exist, we do not need to add it to the blocklist // as it does not contain any client address to block. - log.DebugLog(ctx, "metadata %s for subvolume %s doesn't exists, skipping unfencing for nodeId %s", - metadataKey, volOptions.SubVolume.VolID, nodeId) + log.DebugLog(ctx, "metadata %s for %s doesn't exists, skipping unfencing for nodeId %s", + metadataKey, handler.logSubvolumeName, nodeId) return nil } if clientAddress == "" { - return fmt.Errorf("client address metadata %s for subvolume %s is empty", - metadataKey, volOptions.SubVolume.VolID) + return fmt.Errorf("client address metadata %s for %s is empty", + metadataKey, handler.logSubvolumeName) } - if err = util.RemoveCephBlocklist(ctx, volOptions.Monitors, credentials, clientAddress, "", false); err != nil { + if err = util.RemoveCephBlocklist(ctx, volOptions.Monitors, cr, clientAddress, "", false); err != nil { return fmt.Errorf("failed to remove client address %s from blocklist: %w", clientAddress, err) } - if err = volClient.UnsetAllMetadata([]string{metadataKey}); err != nil { - return fmt.Errorf("failed to remove metadata %s from subvolume %s: %w", metadataKey, volOptions.VolID, err) + if err = handler.unsetAllMetadataFunc([]string{metadataKey}); err != nil { + return fmt.Errorf("failed to remove metadata %s from %s: %w", metadataKey, handler.logSubvolumeName, err) } return nil diff --git a/internal/cephfs/core/metadata.go b/internal/cephfs/core/metadata.go index a86f551e2e6..5d462476ada 100644 --- a/internal/cephfs/core/metadata.go +++ b/internal/cephfs/core/metadata.go @@ -30,7 +30,7 @@ const ( // clientAddressKey is the key used to store the client address. // starts with `.` to avoid copying it to the mirrored subvolume. - clientAddressKey = ".cephfs.csi.ceph.com/clientAddress" + clientAddressKey = ".cephfs.csi.ceph.com/clientaddress" ) // ErrSubVolMetadataNotSupported is returned when set/get/list/remove subvolume metadata options are not supported. @@ -38,8 +38,8 @@ var ErrSubVolMetadataNotSupported = errors.New("subvolume metadata operations ar // GetClientAddressKey returns the key to store the client address in the // subvolume metadata. -func GetClientAddressKey(nodeId string) string { - return fmt.Sprintf("%s/%s", clientAddressKey, nodeId) +func GetClientAddressKey(volumeId, nodeId string) string { + return fmt.Sprintf("%s/%s/%s", clientAddressKey, volumeId, nodeId) } func (s *subVolumeClient) supportsSubVolMetadata() bool { diff --git a/internal/cephfs/core/snapshot.go b/internal/cephfs/core/snapshot.go index b6d37548910..57f2a4e8f14 100644 --- a/internal/cephfs/core/snapshot.go +++ b/internal/cephfs/core/snapshot.go @@ -47,6 +47,8 @@ type SnapshotClient interface { // UnsetAllSnapshotMetadata unset all the metadata from arg keys on // subvolume snapshot. UnsetAllSnapshotMetadata(keys []string) error + // ListSnapshotMetadata lists all the metadata on subvolume snapshot. + ListSnapshotMetadata() (map[string]string, error) } // snapshotClient is the implementation of SnapshotClient interface. diff --git a/internal/cephfs/core/snapshot_metadata.go b/internal/cephfs/core/snapshot_metadata.go index b5cb5457b9f..9b122e1b3c2 100644 --- a/internal/cephfs/core/snapshot_metadata.go +++ b/internal/cephfs/core/snapshot_metadata.go @@ -86,6 +86,24 @@ func (s *snapshotClient) removeSnapshotMetadata(key string) error { return err } +// listMetadata lists custom metadata set on the subvolume in a volume +// and returns a map of key-value pairs. +func (s *snapshotClient) listSnapshotMetadata() (map[string]string, error) { + if !s.supportsSubVolSnapMetadata() { + return nil, ErrSubVolSnapMetadataNotSupported + } + fsa, err := s.conn.GetFSAdmin() + if err != nil { + return nil, err + } + metadata, err := fsa.ListMetadata(s.FsName, s.SubvolumeGroup, s.VolID) + if !s.isUnsupportedSubVolSnapMetadata(err) { + return nil, ErrSubVolSnapMetadataNotSupported + } + + return metadata, err +} + // SetAllSnapshotMetadata set all the metadata from arg parameters on // subvolume snapshot. func (s *snapshotClient) SetAllSnapshotMetadata(parameters map[string]string) error { @@ -135,3 +153,22 @@ func (s *snapshotClient) UnsetAllSnapshotMetadata(keys []string) error { return nil } + +// ListSnapshotMetadata lists all the metadata on subvolume snapshot. +func (s *snapshotClient) ListSnapshotMetadata() (map[string]string, error) { + if !s.enableMetadata { + return nil, nil + } + + metadata, err := s.listSnapshotMetadata() + // If listSnapshotMetadata is not supported return nil + if errors.Is(err, ErrSubVolSnapMetadataNotSupported) { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to list metadata on subvolume snapshot %s %s in fs %s: %w", + s.SnapshotID, s.VolID, s.FsName, err) + } + + return metadata, nil +} diff --git a/internal/cephfs/nodeserver.go b/internal/cephfs/nodeserver.go index 704b28c3b55..c7d1ff4acbb 100644 --- a/internal/cephfs/nodeserver.go +++ b/internal/cephfs/nodeserver.go @@ -274,7 +274,7 @@ func (ns *NodeServer) NodeStageVolume( return nil, status.Error(codes.Internal, err.Error()) } - err = ns.setClientAddress(ctx, volOptions) + err = ns.setClientAddress(ctx, req.GetVolumeId(), req.GetSecrets(), volOptions) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -336,17 +336,18 @@ func (ns *NodeServer) NodeStageVolume( // setClientAddress extracts the client IP address and stores it in the subvolume metadata. // The connection.GetAddrs() method returns an address in the format "10.244.0.1:0/2686266785". // We parse this to extract just the IP address portion (e.g., "10.244.0.1") and store in metadata. -// If '--enable-fencing' flag is set to false in CSI driver configuration, this function does nothing. +// If the '--enable-fencing' flag is set to false in CSI driver configuration or if the volume is static +// this function does nothing. func (ns *NodeServer) setClientAddress( ctx context.Context, + volumeId string, + secrets map[string]string, volOptions *store.VolumeOptions, ) error { - if !ns.Driver.IsFencingEnabled() { + if !ns.Driver.IsFencingEnabled() || !volOptions.ProvisionVolume { return nil } - nodeId := ns.Driver.GetNodeID() - metadataKey := core.GetClientAddressKey(nodeId) conn := volOptions.GetConnection() address, err := conn.GetAddrs() if err != nil { @@ -360,11 +361,37 @@ func (ns *NodeServer) setClientAddress( return fmt.Errorf("failed to parse client address: %w", err) } - subvolumeClient := core.NewSubVolume(conn, &volOptions.SubVolume, volOptions.ClusterID, "", true) + nodeId := ns.Driver.GetNodeID() + metadataKey := core.GetClientAddressKey(volumeId, nodeId) params := map[string]string{metadataKey: ipAddress} + if volOptions.BackingSnapshot { + cr, err := util.NewAdminCredentials(secrets) + if err != nil { + return fmt.Errorf("failed to create credentials from secrets: %w", err) + } + defer cr.DeleteCredentials() + + volOpt, _, sid, err := store.NewSnapshotOptionsFromID(ctx, volOptions.BackingSnapshotID, cr, secrets, "", true) + if err != nil { + return err + } + defer volOpt.Destroy() + snapClient := core.NewSnapshot(conn, sid.FsSnapshotName, volOpt.ClusterID, "", true, &volOpt.SubVolume) + if err = snapClient.SetAllSnapshotMetadata(params); err != nil { + return fmt.Errorf("failed to set client address metadata %s for subvolume snapshot %s/%s: %w", + metadataKey, sid.FsSubvolName, sid.FsSnapshotName, err) + } + + log.DebugLog(ctx, "client address %s set in metadata %s for subvolume snapshot %s/%s", + address, metadataKey, sid.FsSubvolName, sid.FsSnapshotName) + + return nil + } + + subvolumeClient := core.NewSubVolume(conn, &volOptions.SubVolume, volOptions.ClusterID, "", true) err = subvolumeClient.SetAllMetadata(params) if err != nil { - log.ErrorLog(ctx, "failed to set client address for subvolume %s: %v", volOptions.VolID, err) + log.ErrorLog(ctx, "failed to set client address for subvolume %s: %v", volOptions.SubVolume.VolID, err) } log.DebugLog(ctx, "client address %s set in metadata %s for subvolume %s", From 6097606cd37d8e910546ec6102ccb7aff2715157 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 14 Aug 2025 13:57:32 +0530 Subject: [PATCH 010/157] rbd: include volumeId in clientaddress metadata for consistency For CephFS read-only PVCs created from snapshots, metadata is stored in the snapshot and identified using the volumeHandle. The clientaddress metadata format there is: '.cephfs.csi.ceph.com/clientaddress//' This change updates the clientaddress metadata format for RBD to also include the volumeId, aligning it with the CephFS format: '.rbd.csi.ceph.com/clientaddress//'. Signed-off-by: Praveen M --- internal/rbd/controllerserver.go | 4 ++-- internal/rbd/driver/driver.go | 2 -- internal/rbd/nodeserver.go | 10 ++++++---- internal/rbd/rbd_util.go | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index dd32e4fc5d8..ffb084a6320 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -1838,7 +1838,7 @@ func (cs *ControllerServer) fenceNode( return errors.New("nodeId cannot be empty") } - metadataKey := getClientAddressKey(nodeId) + metadataKey := getClientAddressKey(rv.VolID, nodeId) isOutOfService, err := k8s.IsNodeOutOfService(nodeId) if err != nil { return fmt.Errorf("failed to check if node %s is out of service: %w", nodeId, err) @@ -1910,7 +1910,7 @@ func (cs *ControllerServer) unfenceNode( return nil } - metadataKey := getClientAddressKey(nodeId) + metadataKey := getClientAddressKey(rv.VolID, nodeId) clientAddress, err := rv.GetMetadata(metadataKey) if err != nil { if errors.Is(err, librbd.ErrNotFound) { diff --git a/internal/rbd/driver/driver.go b/internal/rbd/driver/driver.go index bf40362eb5f..cb9c1330878 100644 --- a/internal/rbd/driver/driver.go +++ b/internal/rbd/driver/driver.go @@ -87,8 +87,6 @@ func NewNodeServer( // // This also configures and starts a new CSI-Addons service, by calling // setupCSIAddonsServer(). -// -//nolint:gocyclo,cyclop // TODO: reduce complexity func (r *Driver) Run(conf *util.Config) { var ( err error diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index ae3dea9a17d..9c520883237 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -400,7 +400,7 @@ func (ns *NodeServer) NodeStageVolume( } // Set ClientAddress in the image metadata - err = ns.setClientAddress(ctx, cr, rv) + err = ns.setClientAddress(ctx, cr, rv, isStaticVol) if err != nil { return nil, status.Errorf(codes.Internal, "failed to set client address for %s: %v", rv, err) } @@ -435,18 +435,20 @@ func (ns *NodeServer) NodeStageVolume( // setClientAddress extracts the client IP address and stores it in the RBD image metadata. // The connection.GetAddrs() method returns an address in the format "10.244.0.1:0/2686266785". // We parse this to extract just the IP address portion (e.g., "10.244.0.1") and store in metadata. -// If '--enable-fencing' flag is set to false in CSI driver configuration, this function does nothing. +// If the '--enable-fencing' flag is set to false in CSI driver configuration or if the volume is static +// this function does nothing. func (ns *NodeServer) setClientAddress( ctx context.Context, cr *util.Credentials, rv *rbdVolume, + isStaticVol bool, ) error { - if !ns.Driver.IsFencingEnabled() { + if !ns.Driver.IsFencingEnabled() || isStaticVol { return nil } nodeId := ns.Driver.GetNodeID() - metadataKey := getClientAddressKey(nodeId) + metadataKey := getClientAddressKey(rv.VolID, nodeId) monitors, _ /* clusterID*/, err := util.GetMonsAndClusterID(ctx, rv.ClusterID, false) if err != nil { return fmt.Errorf("failed to get monitors for cluster %s: %w", rv.ClusterID, err) diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index 4c857929cfb..ef454fa7c76 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -89,7 +89,7 @@ const ( // clientAddressKey is the key used to store the client address. // starts with `.` to avoid copying it to the mirrored image. - clientAddressKey = ".rbd.csi.ceph.com/clientAddress" + clientAddressKey = ".rbd.csi.ceph.com/clientaddress" // Suffix added to the temp cloned image name. // This will always be (rbd image name + "-temp"). @@ -287,8 +287,8 @@ var ( ) // getClientAddressKey returns the key for storing client address metadata for a specific node. -func getClientAddressKey(nodeId string) string { - return fmt.Sprintf("%s/%s", clientAddressKey, nodeId) +func getClientAddressKey(volumeId, nodeId string) string { + return fmt.Sprintf("%s/%s/%s", clientAddressKey, volumeId, nodeId) } // prepareKrbdFeatureAttrs prepare krbd fearure set based on kernel version. From b2ba71a539cbfca32a6b47aa929b836463542002 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 24 Jul 2025 18:10:54 +0530 Subject: [PATCH 011/157] helm: update templates for fencing Signed-off-by: Praveen M --- charts/ceph-csi-cephfs/README.md | 2 ++ .../ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml | 3 +++ .../templates/provisioner-clusterrole.yaml | 6 ++++++ .../templates/provisioner-deployment.yaml | 3 +++ charts/ceph-csi-cephfs/templates/storageclass.yaml | 6 ++++++ charts/ceph-csi-cephfs/values.yaml | 8 ++++++++ charts/ceph-csi-rbd/README.md | 2 ++ charts/ceph-csi-rbd/templates/nodeplugin-daemonset.yaml | 3 +++ .../ceph-csi-rbd/templates/provisioner-deployment.yaml | 3 +++ charts/ceph-csi-rbd/templates/storageclass.yaml | 6 ++++++ charts/ceph-csi-rbd/values.yaml | 8 ++++++++ scripts/install-helm.sh | 9 +++++++-- 12 files changed, 57 insertions(+), 2 deletions(-) diff --git a/charts/ceph-csi-cephfs/README.md b/charts/ceph-csi-cephfs/README.md index 4040d704cbf..4d18e388ce5 100644 --- a/charts/ceph-csi-cephfs/README.md +++ b/charts/ceph-csi-cephfs/README.md @@ -216,6 +216,8 @@ charts and their default values. | `storageClass.provisionerSecretNamespace` | Specifies the provisioner secret namespace | `""` | | `storageClass.controllerExpandSecret` | Specifies the controller expand secret name | `csi-cephfs-secret` | | `storageClass.controllerExpandSecretNamespace` | Specifies the controller expand secret namespace | `""` | +| `storageClass.controllerPublishSecret` | Specifies the controller publish secret name | `csi-cephfs-secret` | +| `storageClass.controllerPublishSecretNamespace`| Specifies the controller publish secret namespace | `""` | | `storageClass.nodeStageSecret` | Specifies the node stage secret name | `csi-cephfs-secret` | | `storageClass.nodeStageSecretNamespace` | Specifies the node stage secret namespace | `""` | | `storageClass.reclaimPolicy` | Specifies the reclaim policy of the StorageClass | `Delete` | diff --git a/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml b/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml index df8e6648000..fe331b848d7 100644 --- a/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml +++ b/charts/ceph-csi-cephfs/templates/nodeplugin-daemonset.yaml @@ -80,6 +80,9 @@ spec: - "--enable-read-affinity={{ and .Values.readAffinity .Values.readAffinity.enabled | default false }}" {{- if and .Values.readAffinity .Values.readAffinity.enabled }} - "--crush-location-labels={{ .Values.readAffinity.crushLocationLabels | join "," }}" +{{- end }} +{{- if .Values.nodeplugin.fencing }} + - "--enable-fencing={{ .Values.nodeplugin.fencing }}" {{- end }} - "--logslowopinterval={{ .Values.logSlowOperationInterval }}" env: diff --git a/charts/ceph-csi-cephfs/templates/provisioner-clusterrole.yaml b/charts/ceph-csi-cephfs/templates/provisioner-clusterrole.yaml index 4b838bb0e9e..4f0e4188ad6 100644 --- a/charts/ceph-csi-cephfs/templates/provisioner-clusterrole.yaml +++ b/charts/ceph-csi-cephfs/templates/provisioner-clusterrole.yaml @@ -66,6 +66,12 @@ rules: verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] {{- end -}} {{- if .Values.provisioner.attacher.enabled }} + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list","watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update", "patch"] diff --git a/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml b/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml index ccacd84e703..93335c89dc7 100644 --- a/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml +++ b/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml @@ -99,6 +99,9 @@ spec: {{- end }} {{- if .Values.provisioner.clustername }} - "--clustername={{ .Values.provisioner.clustername }}" +{{- end }} +{{- if .Values.provisioner.fencing }} + - "--enable-fencing={{ .Values.provisioner.fencing }}" {{- end }} - "--setmetadata={{ .Values.provisioner.setmetadata }}" - "--logslowopinterval={{ .Values.logSlowOperationInterval }}" diff --git a/charts/ceph-csi-cephfs/templates/storageclass.yaml b/charts/ceph-csi-cephfs/templates/storageclass.yaml index 35e5c5e43f7..f7912200854 100644 --- a/charts/ceph-csi-cephfs/templates/storageclass.yaml +++ b/charts/ceph-csi-cephfs/templates/storageclass.yaml @@ -49,6 +49,12 @@ parameters: csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Values.storageClass.controllerExpandSecretNamespace }} {{ else }} csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Release.Namespace }} +{{- end }} + csi.storage.k8s.io/controller-publish-secret-name: {{ .Values.storageClass.controllerPublishSecret }} +{{- if .Values.storageClass.controllerPublishSecretNamespace }} + csi.storage.k8s.io/controller-publish-secret-namespace: {{ .Values.storageClass.controllerPublishSecretNamespace }} +{{ else }} + csi.storage.k8s.io/controller-publish-secret-namespace: {{ .Release.Namespace }} {{- end }} csi.storage.k8s.io/node-stage-secret-name: {{ .Values.storageClass.nodeStageSecret }} {{- if .Values.storageClass.nodeStageSecretNamespace }} diff --git a/charts/ceph-csi-cephfs/values.yaml b/charts/ceph-csi-cephfs/values.yaml index c6086dcaf35..7226353d512 100644 --- a/charts/ceph-csi-cephfs/values.yaml +++ b/charts/ceph-csi-cephfs/values.yaml @@ -163,6 +163,9 @@ nodeplugin: kernelmountoptions: "" fusemountoptions: "" + # enable fencing + fencing: false + provisioner: name: provisioner replicaCount: 3 @@ -244,6 +247,9 @@ provisioner: # set metadata on volume setmetadata: true + # enable fencing + fencing: false + attacher: name: attacher enabled: true @@ -382,6 +388,8 @@ storageClass: provisionerSecretNamespace: "" controllerExpandSecret: csi-cephfs-secret controllerExpandSecretNamespace: "" + controllerPublishSecret: csi-cephfs-secret + controllerPublishSecretNamespace: "" nodeStageSecret: csi-cephfs-secret nodeStageSecretNamespace: "" reclaimPolicy: Delete diff --git a/charts/ceph-csi-rbd/README.md b/charts/ceph-csi-rbd/README.md index 003bd30d699..b4e8eedbee9 100644 --- a/charts/ceph-csi-rbd/README.md +++ b/charts/ceph-csi-rbd/README.md @@ -206,6 +206,8 @@ charts and their default values. | `storageClass.provisionerSecretNamespace` | Specifies the provisioner secret namespace | `""` | | `storageClass.controllerExpandSecret` | Specifies the controller expand secret name | `csi-rbd-secret` | | `storageClass.controllerExpandSecretNamespace` | Specifies the controller expand secret namespace | `""` | +| `storageClass.controllerPublishSecret` | Specifies the controller publish secret name | `csi-rbd-secret` | +| `storageClass.controllerPublishSecretNamespace`| Specifies the controller publish secret namespace | `""` | | `storageClass.nodeStageSecret` | Specifies the node stage secret name | `csi-rbd-secret` | | `storageClass.nodeStageSecretNamespace` | Specifies the node stage secret namespace | `""` | | `storageClass.fstype` | Specify the filesystem type of the volume | `ext4` | diff --git a/charts/ceph-csi-rbd/templates/nodeplugin-daemonset.yaml b/charts/ceph-csi-rbd/templates/nodeplugin-daemonset.yaml index 4f25af3c26d..7e8e70f09c2 100644 --- a/charts/ceph-csi-rbd/templates/nodeplugin-daemonset.yaml +++ b/charts/ceph-csi-rbd/templates/nodeplugin-daemonset.yaml @@ -78,6 +78,9 @@ spec: - "--enable-read-affinity={{ and .Values.readAffinity .Values.readAffinity.enabled | default false }}" {{- if and .Values.readAffinity .Values.readAffinity.enabled }} - "--crush-location-labels={{ .Values.readAffinity.crushLocationLabels | join "," }}" +{{- end }} +{{- if .Values.nodeplugin.fencing }} + - "--enable-fencing={{ .Values.nodeplugin.fencing }}" {{- end }} - "--logslowopinterval={{ .Values.logSlowOperationInterval }}" env: diff --git a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml index f2aea0e2db5..a57c9293bba 100644 --- a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml +++ b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml @@ -105,6 +105,9 @@ spec: {{- if .Values.provisioner.clustername }} - "--clustername={{ .Values.provisioner.clustername }}" {{- end }} + {{- if .Values.provisioner.fencing }} + - "--enable-fencing={{ .Values.provisioner.fencing }}" + {{- end }} - "--setmetadata={{ .Values.provisioner.setmetadata }}" - "--logslowopinterval={{ .Values.logSlowOperationInterval }}" env: diff --git a/charts/ceph-csi-rbd/templates/storageclass.yaml b/charts/ceph-csi-rbd/templates/storageclass.yaml index 188362fc050..77881ff173c 100644 --- a/charts/ceph-csi-rbd/templates/storageclass.yaml +++ b/charts/ceph-csi-rbd/templates/storageclass.yaml @@ -77,6 +77,12 @@ parameters: csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Values.storageClass.controllerExpandSecretNamespace }} {{ else }} csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Release.Namespace }} +{{- end }} + csi.storage.k8s.io/controller-publish-secret-name: {{ .Values.storageClass.controllerPublishSecret }} +{{- if .Values.storageClass.controllerPublishSecretNamespace }} + csi.storage.k8s.io/controller-publish-secret-namespace: {{ .Values.storageClass.controllerPublishSecretNamespace }} +{{ else }} + csi.storage.k8s.io/controller-publish-secret-namespace: {{ .Release.Namespace }} {{- end }} csi.storage.k8s.io/node-stage-secret-name: {{ .Values.storageClass.nodeStageSecret }} {{- if .Values.storageClass.nodeStageSecretNamespace }} diff --git a/charts/ceph-csi-rbd/values.yaml b/charts/ceph-csi-rbd/values.yaml index 656be50015c..daaf9d6b89b 100644 --- a/charts/ceph-csi-rbd/values.yaml +++ b/charts/ceph-csi-rbd/values.yaml @@ -165,6 +165,9 @@ nodeplugin: podAnnotations: {} + # enable fencing + fencing: false + provisioner: name: provisioner replicaCount: 3 @@ -207,6 +210,9 @@ provisioner: # useful for deployments where the podNetwork has no access to ceph enableHostNetwork: false + # enable fencing + fencing: false + httpMetrics: # Metrics only available for cephcsi/cephcsi => 1.2.0 # Specifies whether http metrics should be exposed @@ -513,6 +519,8 @@ storageClass: provisionerSecretNamespace: "" controllerExpandSecret: csi-rbd-secret controllerExpandSecretNamespace: "" + controllerPublishSecret: csi-rbd-secret + controllerPublishSecretNamespace: "" nodeStageSecret: csi-rbd-secret nodeStageSecretNamespace: "" # Specify the filesystem type of the volume. If not specified, diff --git a/scripts/install-helm.sh b/scripts/install-helm.sh index 4d23fa0a329..3e815c6896b 100755 --- a/scripts/install-helm.sh +++ b/scripts/install-helm.sh @@ -19,6 +19,7 @@ DEPLOY_TIMEOUT=600 DEPLOY_SC=0 DEPLOY_SECRET=0 ENABLE_READ_AFFINITY=1 +ENABLE_FENCING=1 # ceph-csi specific variables NODE_LABEL_REGION="test.failure-domain/region" @@ -178,9 +179,13 @@ install_cephcsi_helm_charts() { if [ "${ENABLE_READ_AFFINITY}" -eq 1 ]; then READ_AFFINITY_VALUES="--set readAffinity.enabled=true --set readAffinity.crushLocationLabels={${CRUSH_LOCATION_REGION_LABEL},${CRUSH_LOCATION_ZONE_LABEL}}" fi + # enable fencing + if [ "${ENABLE_FENCING}" -eq 1 ]; then + ENABLE_FENCING_VALUES="--set provisioner.fencing=true --set nodeplugin.fencing=true" + fi # install ceph-csi-cephfs and ceph-csi-rbd charts # shellcheck disable=SC2086 - "${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-cephfs", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${CEPHFS_SECRET_TEMPLATE_VALUES} ${CEPHFS_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs ${READ_AFFINITY_VALUES} --set provisioner.snapshotter.args.enableVolumeGroupSnapshots=true + "${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-cephfs", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${CEPHFS_SECRET_TEMPLATE_VALUES} ${CEPHFS_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs ${READ_AFFINITY_VALUES} --set provisioner.snapshotter.args.enableVolumeGroupSnapshots=true ${ENABLE_FENCING_VALUES} check_deployment_status app=ceph-csi-cephfs "${NAMESPACE}" check_daemonset_status app=ceph-csi-cephfs "${NAMESPACE}" @@ -191,7 +196,7 @@ install_cephcsi_helm_charts() { kubectl_retry delete cm ceph-csi-encryption-kms-config --namespace "${NAMESPACE}" # shellcheck disable=SC2086 - "${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-rbd", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${RBD_SECRET_TEMPLATE_VALUES} ${RBD_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}" --set provisioner.maxSnapshotsOnImage=3 --set provisioner.minSnapshotsOnImage=2 ${READ_AFFINITY_VALUES} --set provisioner.snapshotter.args.enableVolumeGroupSnapshots=true + "${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-rbd", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${RBD_SECRET_TEMPLATE_VALUES} ${RBD_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}" --set provisioner.maxSnapshotsOnImage=3 --set provisioner.minSnapshotsOnImage=2 ${READ_AFFINITY_VALUES} --set provisioner.snapshotter.args.enableVolumeGroupSnapshots=true ${ENABLE_FENCING_VALUES} check_deployment_status app=ceph-csi-rbd "${NAMESPACE}" check_daemonset_status app=ceph-csi-rbd "${NAMESPACE}" From 9aa47d771980dd5105713aa6114a560879751467 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Tue, 19 Aug 2025 16:16:19 +0530 Subject: [PATCH 012/157] deploy: add enable-fencing flag to deployment files This commit add enable-fencing flag to cephfs & rbd nodeplugin and provisioner deployment yamls. This is also needed for e2e so it can deploy driver with fencing enabled. Signed-off-by: Praveen M --- deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml | 1 + deploy/cephfs/kubernetes/csi-cephfsplugin.yaml | 1 + deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml | 1 + deploy/rbd/kubernetes/csi-rbdplugin.yaml | 1 + examples/cephfs/storageclass.yaml | 2 ++ examples/rbd/storageclass.yaml | 2 ++ 6 files changed, 8 insertions(+) diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml index f679711a7b6..e329ee95bbb 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml @@ -55,6 +55,7 @@ spec: - "--pidlimit=-1" - "--enableprofiling=false" - "--setmetadata=true" + # - "--enable-fencing=true" env: - name: POD_IP valueFrom: diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml index be5e2fec3d7..87edfa005b9 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml @@ -36,6 +36,7 @@ spec: - "--v=5" - "--drivername=cephfs.csi.ceph.com" - "--enableprofiling=false" + # - "--enable-fencing=true" # If topology based provisioning is desired, configure required # node labels representing the nodes topology domain # and pass the label names below, for CSI to consume and advertise diff --git a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml index 3a7cb95fc07..416dc43513f 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml @@ -62,6 +62,7 @@ spec: - "--rbdsoftmaxclonedepth=4" - "--enableprofiling=false" - "--setmetadata=true" + # - "--enable-fencing=true" env: - name: POD_IP valueFrom: diff --git a/deploy/rbd/kubernetes/csi-rbdplugin.yaml b/deploy/rbd/kubernetes/csi-rbdplugin.yaml index 4e096e90acd..407eef3eac0 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin.yaml @@ -41,6 +41,7 @@ spec: - "--v=5" - "--drivername=rbd.csi.ceph.com" - "--enableprofiling=false" + # - "--enable-fencing=true" # If topology based provisioning is desired, configure required # node labels representing the nodes topology domain # and pass the label names below, for CSI to consume and advertise diff --git a/examples/cephfs/storageclass.yaml b/examples/cephfs/storageclass.yaml index fd3aa164994..e9098580d05 100644 --- a/examples/cephfs/storageclass.yaml +++ b/examples/cephfs/storageclass.yaml @@ -34,6 +34,8 @@ parameters: csi.storage.k8s.io/provisioner-secret-namespace: default csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret csi.storage.k8s.io/controller-expand-secret-namespace: default + csi.storage.k8s.io/controller-publish-secret-name: csi-cephfs-secret + csi.storage.k8s.io/controller-publish-secret-namespace: default csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret csi.storage.k8s.io/node-stage-secret-namespace: default diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index 78843dad9a3..31b1e23b690 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -86,6 +86,8 @@ parameters: csi.storage.k8s.io/provisioner-secret-namespace: default csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret csi.storage.k8s.io/controller-expand-secret-namespace: default + csi.storage.k8s.io/controller-publish-secret-name: csi-rbd-secret + csi.storage.k8s.io/controller-publish-secret-namespace: default csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret csi.storage.k8s.io/node-stage-secret-namespace: default From c41a662632a5e4d3eecad4531b52331e3abbec6f Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 14 Aug 2025 19:32:12 +0530 Subject: [PATCH 013/157] e2e: wait for PV to be deleted instead of PVC With Controller PUBLISH_UNPUBLISH capability, external-attacher runs detach operation, causing PV deletion to take longer. For static volume test, validate if the PV is deleted before deleting backend image/subvolume. Problem: In ephemeral volume support test, deleting a pod instantly deletes the PVC, but PV deletion takes time, leaving backend resources and causing stale resource errors. Solution: Wait for PV deletion instead of PVC deletion to ensure backend resources are fully cleaned up. Signed-off-by: Praveen M --- e2e/cephfs.go | 8 ++++++-- e2e/pvc.go | 20 ++++++++++---------- e2e/rbd.go | 8 ++++++-- e2e/staticpvc.go | 10 ++++++++++ 4 files changed, 32 insertions(+), 14 deletions(-) diff --git a/e2e/cephfs.go b/e2e/cephfs.go index b07ea70e9fb..1adb37deea6 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -456,7 +456,11 @@ var _ = Describe(cephfsType, func() { } validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup) validateOmapCount(f, 1, cephfsType, metadataPool, volumesType) - pvcName := app.Spec.Volumes[0].Name + pvcName := fmt.Sprintf("%s-%s", app.Name, app.Spec.Volumes[0].Name) + pvc, err := getPersistentVolumeClaim(c, app.Namespace, pvcName) + if err != nil { + framework.Failf("failed to get pvc: %v", err) + } // delete pod err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { @@ -464,7 +468,7 @@ var _ = Describe(cephfsType, func() { } // wait for the associated PVC to be deleted - err = waitForPVCToBeDeleted(f.ClientSet, app.Namespace, pvcName, deployTimeout) + err = waitForPVToBeDeleted(f.ClientSet, pvc.Spec.VolumeName, deployTimeout) if err != nil { logAndFail("failed to wait for PVC deletion: %v", err) } diff --git a/e2e/pvc.go b/e2e/pvc.go index 46bf198b33c..d4bf0395725 100644 --- a/e2e/pvc.go +++ b/e2e/pvc.go @@ -453,34 +453,34 @@ func getMetricsForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, t i }) } -func waitForPVCToBeDeleted(c kubernetes.Interface, namespace, pvcName string, t int) error { +func waitForPVToBeDeleted(c kubernetes.Interface, pvName string, t int) error { timeout := time.Duration(t) * time.Minute ctx := context.TODO() start := time.Now() return wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) { - pvc, err := c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) - // Check that the PVC is really deleted. + pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) + // Check that the PV is really deleted. framework.Logf( - "waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", - pvcName, - pvc.Status.String(), + "waiting for PV %s in state %s to be deleted (%d seconds elapsed)", + pvName, + pv.Status.String(), int(time.Since(start).Seconds())) if err == nil { - framework.Logf("PVC %s (status: %v) has not been deleted yet, rechecking...", pvcName, pvc.Status) + framework.Logf("PV %s (status: %v) has not been deleted yet, rechecking...", pvName, pv.Status) return false, nil } if isRetryableAPIError(err) { - framework.Logf("failed to verify deletion of PVC %s (status: %v): %v", pvcName, pvc.Status, err) + framework.Logf("failed to verify deletion of PV %s (status: %v): %v", pvName, pv.Status, err) return false, nil } if !apierrs.IsNotFound(err) { - return false, fmt.Errorf("get on deleted PVC %v failed with error other than \"not found\": %w", pvcName, err) + return false, fmt.Errorf("get on deleted PV %v failed with error other than \"not found\": %w", pvName, err) } - // PVC has been successfully deleted + // PV has been successfully deleted return true, nil }) } diff --git a/e2e/rbd.go b/e2e/rbd.go index 129179c489c..37792575bd0 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -907,14 +907,18 @@ var _ = Describe("RBD", func() { // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType) - pvcName := app.Spec.Volumes[0].Name + pvcName := fmt.Sprintf("%s-%s", app.Name, app.Spec.Volumes[0].Name) + pvc, err := getPersistentVolumeClaim(c, app.Namespace, pvcName) + if err != nil { + framework.Failf("failed to get pvc: %v", err) + } err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { logAndFail("failed to delete application: %v", err) } // wait for the associated PVC to be deleted - err = waitForPVCToBeDeleted(f.ClientSet, app.Namespace, pvcName, deployTimeout) + err = waitForPVToBeDeleted(f.ClientSet, pvc.Spec.VolumeName, deployTimeout) if err != nil { logAndFail("failed to wait for PVC deletion: %v", err) } diff --git a/e2e/staticpvc.go b/e2e/staticpvc.go index bed9bd3abea..4bbd707ba01 100644 --- a/e2e/staticpvc.go +++ b/e2e/staticpvc.go @@ -226,6 +226,11 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock, checkI return fmt.Errorf("failed to delete pv: %w", err) } + err = waitForPVToBeDeleted(f.ClientSet, pv.Name, deployTimeout) + if err != nil { + return fmt.Errorf("failed to validate PV %s is deleted: %w", pv.Name, err) + } + cmd = fmt.Sprintf("rbd rm %s %s", rbdImageName, rbdOptions(defaultRBDPool)) _, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace) @@ -461,6 +466,11 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath, fsName stri return fmt.Errorf("failed to delete pv: %w", err) } + err = waitForPVToBeDeleted(f.ClientSet, pv.Name, deployTimeout) + if err != nil { + return fmt.Errorf("failed to validate PV %s is deleted: %w", pv.Name, err) + } + err = c.CoreV1().Secrets(cephCSINamespace).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("failed to delete secret: %w", err) From bcdae899f6e3caf3af63d86b31f632bb26b026c6 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Tue, 19 Aug 2025 16:15:54 +0530 Subject: [PATCH 014/157] e2e: test to verify the client address metadata exists This commit adds e2e tests to verify if the client address metadata exists in cephfs subvolume and rbd image. Signed-off-by: Praveen M --- e2e/cephfs.go | 52 ++++++++++++++++-- e2e/cephfs_helper.go | 73 +++++++++++++++++++++++++ e2e/deployment.go | 7 +++ e2e/pod.go | 126 +++++++++++++++++++++++++++++++++++++++++++ e2e/rbd.go | 19 +++++-- e2e/rbd_helper.go | 3 ++ e2e/utils.go | 4 ++ 7 files changed, 276 insertions(+), 8 deletions(-) diff --git a/e2e/cephfs.go b/e2e/cephfs.go index 1adb37deea6..c06c8f3369e 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -99,9 +99,10 @@ func createORDeleteCephfsResources(action kubectlAction) { }, // the provisioner itself &yamlResourceNamespaced{ - filename: cephFSDirPath + cephFSProvisioner, - namespace: cephCSINamespace, - oneReplica: true, + filename: cephFSDirPath + cephFSProvisioner, + namespace: cephCSINamespace, + oneReplica: true, + enableFencing: true, }, // dependencies for the node-plugin &yamlResourceNamespaced{ @@ -110,8 +111,9 @@ func createORDeleteCephfsResources(action kubectlAction) { }, // the node-plugin itself &yamlResourceNamespaced{ - filename: cephFSDirPath + cephFSNodePlugin, - namespace: cephCSINamespace, + filename: cephFSDirPath + cephFSNodePlugin, + namespace: cephCSINamespace, + enableFencing: true, }, } @@ -361,6 +363,26 @@ var _ = Describe(cephfsType, func() { }) } + By("verify client address metadata exists", func() { + err := createCephfsStorageClass(f.ClientSet, f, true, nil) + if err != nil { + framework.Failf("failed to create CephFS storageclass: %v", err) + } + + err = verifyClientAddressMetadataExists(f, pvcPath, appPath, cephfsType) + if err != nil { + framework.Failf("failed to verify client address metadata exists: %v", err) + } + + validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) + validateOmapCount(f, 0, cephfsType, metadataPool, volumesType) + + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS storageclass: %v", err) + } + }) + By("verify PVC and App Binding with volumeBindingMode:WaitForFirstConsumer", func() { err := createCephfsStorageClassWaitForFirstConsumer(f.ClientSet, f, true, nil) if err != nil { @@ -1936,6 +1958,26 @@ var _ = Describe(cephfsType, func() { logAndFail("failed to create PVC and app: %v", err) } + // Verify client address metadata of snapshot-backed PVC. + appClone, err = getPod(f.ClientSet, appClone.Namespace, appClone.Name) + if err != nil { + logAndFail("failed to get app pod: %v", err) + } + backingSubvolumeNameData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + logAndFail("failed to get backing subvolume name from PVC: %v", err) + } + backingSnapshotName, err := getSnapName(snap.Namespace, snap.Name) + if err != nil { + logAndFail("failed to get backing snapshot name: %v", err) + } + err = verifyClientAddressMetadataSnapshotBacked( + f, pvcClone, appClone, backingSubvolumeNameData.imageName, backingSnapshotName, + ) + if err != nil { + logAndFail("failed to verify client address metadata of snapshot-backed PVC: %v", err) + } + // Snapshot-backed volume shouldn't contribute to total subvolume count. validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup) diff --git a/e2e/cephfs_helper.go b/e2e/cephfs_helper.go index 2a24dd0b7e0..17eac7365f5 100644 --- a/e2e/cephfs_helper.go +++ b/e2e/cephfs_helper.go @@ -109,6 +109,9 @@ func updateStorageClassParameters(sc *scv1.StorageClass, params map[string]strin sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = cephCSINamespace sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = cephFSProvisionerSecretName + sc.Parameters["csi.storage.k8s.io/controller-publish-secret-namespace"] = cephCSINamespace + sc.Parameters["csi.storage.k8s.io/controller-publish-secret-name"] = cephFSProvisionerSecretName + sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = cephCSINamespace sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = cephFSNodePluginSecretName @@ -294,6 +297,27 @@ func listCephFSSubvolumeMetadata( return metadata, nil } +func getCephFSSubvolumeMetadata( + f *framework.Framework, + filesystem, + subvolume, + groupname, + key string, +) (string, error) { + stdout, stdErr, err := execCommandInToolBoxPod( + f, + fmt.Sprintf("ceph fs subvolume metadata get %s %s --group_name=%s %s", filesystem, subvolume, groupname, key), + rookNamespace) + if err != nil { + return "", err + } + if stdErr != "" { + return "", fmt.Errorf("%s", stdErr) + } + + return strings.TrimSpace(stdout), nil +} + type cephfsSnapshotMetadata struct { VolSnapNameKey string `json:"csi.storage.k8s.io/volumesnapshot/name"` VolSnapNamespaceKey string `json:"csi.storage.k8s.io/volumesnapshot/namespace"` @@ -329,6 +353,28 @@ func listCephFSSnapshotMetadata( return metadata, nil } +func getCephFSSnapshotMetadata( + f *framework.Framework, + filesystem, + subvolume, + snapshot, + groupname, + key string, +) (string, error) { + stdout, stdErr, err := execCommandInToolBoxPod( + f, + fmt.Sprintf("ceph fs subvolume snapshot metadata get %s %s %s %s --group_name=%s", filesystem, subvolume, snapshot, key, groupname), + rookNamespace) + if err != nil { + return "", err + } + if stdErr != "" { + return "", fmt.Errorf("%s", stdErr) + } + + return strings.TrimSpace(stdout), nil +} + type cephfsSnapshot struct { Name string `json:"name"` } @@ -617,3 +663,30 @@ func validateFscryptClone( } } } + +func verifyClientAddressMetadataSnapshotBacked( + f *framework.Framework, + pvc *v1.PersistentVolumeClaim, + pod *v1.Pod, + subVolumeName, snapshotName string, +) error { + nodeId := pod.Spec.NodeName + _, pvObject, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) + if err != nil { + return fmt.Errorf("failed to get PVC and PV: %w", err) + } + volumeHandle := pvObject.Spec.CSI.VolumeHandle + + metadataKey := fmt.Sprintf(".cephfs.csi.ceph.com/clientaddress/%s/%s", volumeHandle, nodeId) + metadataValue, err := getCephFSSnapshotMetadata( + f, fileSystemName, subVolumeName, snapshotName, subvolumegroup, metadataKey) + if err != nil { + return fmt.Errorf("failed to get subvolume snapshot metadata %s: %w", metadataKey, err) + } + + if metadataValue == "" { + return fmt.Errorf("client address metadata %s value is empty", metadataKey) + } + + return nil +} diff --git a/e2e/deployment.go b/e2e/deployment.go index bce18053874..753b6733954 100644 --- a/e2e/deployment.go +++ b/e2e/deployment.go @@ -242,6 +242,9 @@ type yamlResourceNamespaced struct { // enable read affinity support (for RBD) enableReadAffinity bool + + // enable fencing + enableFencing bool } func (yrn *yamlResourceNamespaced) Do(action kubectlAction) error { @@ -267,6 +270,10 @@ func (yrn *yamlResourceNamespaced) Do(action kubectlAction) error { data = enableReadAffinityInTemplate(data) } + if yrn.enableFencing { + data = enableFencingInTemplate(data) + } + if yrn.crushLocationLabels != "" { data = addCrsuhLocationLabels(data, yrn.crushLocationLabels) } diff --git a/e2e/pod.go b/e2e/pod.go index efe7c8540f0..a08f4e0b64a 100644 --- a/e2e/pod.go +++ b/e2e/pod.go @@ -406,6 +406,15 @@ func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int, ex }) } +func getPod(c kubernetes.Interface, namespace, name string) (*v1.Pod, error) { + pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get app: %w", err) + } + + return pod, nil +} + func deletePod(name, ns string, c kubernetes.Interface, t int) error { timeout := time.Duration(t) * time.Minute ctx := context.TODO() @@ -738,3 +747,120 @@ func verifyReadAffinity( return nil } + +func verifyClientAddressMetadataExists( + f *framework.Framework, + pvcPath, appPath string, + driverType string, +) error { + // create PVC + pvc, err := loadPVC(pvcPath) + if err != nil { + return fmt.Errorf("failed to load pvc: %w", err) + } + pvc.Namespace = f.UniqueName + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + return fmt.Errorf("failed to create PVC: %w", err) + } + + app, err := loadApp(appPath) + if err != nil { + return fmt.Errorf("failed to load application: %w", err) + } + app.Namespace = f.UniqueName + err = createApp(f.ClientSet, app, deployTimeout) + if err != nil { + return fmt.Errorf("failed to create application: %w", err) + } + + pod, err := getPod(f.ClientSet, app.Namespace, app.Name) + if err != nil { + return fmt.Errorf("failed to get pod: %w", err) + } + nodeId := pod.Spec.NodeName + + var ( + metadataKey string + metadataValue string + rbdImageSpec string + subVolumeName string + ) + + _, pvObject, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) + if err != nil { + framework.Logf("error getting pvc %q in namespace %q: %v", pvc.Name, pvc.Namespace, err) + return fmt.Errorf("failed to get PVC and PV: %w", err) + } + volumeHandle := pvObject.Spec.CSI.VolumeHandle + + // First, get the metadata while the pod is running to verify it exists + switch driverType { + case rbdType: + metadataKey = fmt.Sprintf(".rbd.csi.ceph.com/clientaddress/%s/%s", volumeHandle, nodeId) + + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + + rbdImageSpec = imageSpec(defaultRBDPool, imageData.imageName) + metadataValue, err = getImageMeta(rbdImageSpec, metadataKey, f) + if err != nil { + return fmt.Errorf("failed to get image metadata %s: %w", metadataKey, err) + } + case cephfsType: + metadataKey = fmt.Sprintf(".cephfs.csi.ceph.com/clientaddress/%s/%s", volumeHandle, nodeId) + subVolumeName = pvObject.Spec.CSI.VolumeAttributes["subvolumeName"] + metadataValue, err = getCephFSSubvolumeMetadata( + f, fileSystemName, subVolumeName, subvolumegroup, metadataKey) + if err != nil { + return fmt.Errorf("failed to get subvolume metadata %s: %w", metadataKey, err) + } + } + + if metadataValue == "" { + return fmt.Errorf("client address metadata %s value is empty", metadataKey) + } + + err = deletePod(pod.Name, pod.Namespace, f.ClientSet, deployTimeout) + if err != nil { + return fmt.Errorf("failed to delete pod: %w", err) + } + + // verify clientaddress metadata is removed after pod deletion + // retry up to 3 times with 10-second intervals + maxRetries := 3 + retryInterval := 10 * time.Second + + for attempt := 1; attempt <= maxRetries; attempt++ { + switch driverType { + case rbdType: + metadataValue, err = getImageMeta(rbdImageSpec, metadataKey, f) + case cephfsType: + metadataValue, err = getCephFSSubvolumeMetadata( + f, fileSystemName, subVolumeName, subvolumegroup, metadataKey) + } + + if err != nil && strings.Contains(err.Error(), "command terminated with exit code 2") { + framework.Logf("clientaddress metadata %s successfully removed", metadataKey) + break + } + + framework.Logf("clientaddress metadata %s still exists with value %s, retrying (%d/%d)", + metadataKey, metadataValue, attempt, maxRetries) + + if attempt == maxRetries { + return fmt.Errorf("clientaddress metadata %s still exists with value %s (after %d attempts)", + metadataKey, metadataValue, maxRetries) + } + time.Sleep(retryInterval) + } + + err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + return fmt.Errorf("failed to delete PVC: %w", err) + } + + return nil +} diff --git a/e2e/rbd.go b/e2e/rbd.go index 37792575bd0..34243edd106 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -185,9 +185,10 @@ func createORDeleteRbdResources(action kubectlAction) { }, // the provisioner itself &yamlResourceNamespaced{ - filename: rbdDirPath + rbdProvisioner, - namespace: cephCSINamespace, - oneReplica: true, + filename: rbdDirPath + rbdProvisioner, + namespace: cephCSINamespace, + oneReplica: true, + enableFencing: true, }, // dependencies for the node-plugin &yamlResourceNamespaced{ @@ -201,6 +202,7 @@ func createORDeleteRbdResources(action kubectlAction) { domainLabel: nodeRegionLabel + "," + nodeZoneLabel, enableReadAffinity: true, crushLocationLabels: crushLocationRegionLabel + "," + crushLocationZoneLabel, + enableFencing: true, }, } @@ -530,6 +532,17 @@ var _ = Describe("RBD", func() { }) } + By("verify client address metadata exists", func() { + err := verifyClientAddressMetadataExists(f, pvcPath, appPath, rbdType) + if err != nil { + framework.Failf("failed to verify client address metadata exists: %v", err) + } + + // validate created backend rbd images + validateRBDImageCount(f, 0, defaultRBDPool) + validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) + }) + By("verify readAffinity support", func() { err := verifyReadAffinity(f, pvcPath, appPath, rbdDeployment.getDaemonsetName(), rbdContainerName, cephCSINamespace) diff --git a/e2e/rbd_helper.go b/e2e/rbd_helper.go index 17861efdcce..13ee64f036c 100644 --- a/e2e/rbd_helper.go +++ b/e2e/rbd_helper.go @@ -139,6 +139,9 @@ func createRBDStorageClass( sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = cephCSINamespace sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = rbdProvisionerSecretName + sc.Parameters["csi.storage.k8s.io/controller-publish-secret-namespace"] = cephCSINamespace + sc.Parameters["csi.storage.k8s.io/controller-publish-secret-name"] = rbdProvisionerSecretName + sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = cephCSINamespace sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = rbdNodePluginSecretName diff --git a/e2e/utils.go b/e2e/utils.go index 77f7a4bfffd..727d25c312d 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -935,6 +935,10 @@ func enableReadAffinityInTemplate(template string) string { return strings.ReplaceAll(template, "# - \"--enable-read-affinity=true\"", "- \"--enable-read-affinity=true\"") } +func enableFencingInTemplate(template string) string { + return strings.ReplaceAll(template, "# - \"--enable-fencing=true\"", "- \"--enable-fencing=true\"") +} + func addCrsuhLocationLabels(template, labels string) string { return strings.ReplaceAll(template, "# - \"--crush-location-labels=topology.io/zone,topology.io/rack\"", "- \"--crush-location-labels="+labels+"\"") From ef48973007a3a650224f9797a418c0799216b688 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 06:21:23 +0000 Subject: [PATCH 015/157] rebase: bump github.com/ceph/go-ceph from 0.34.0 to 0.35.0 Bumps [github.com/ceph/go-ceph](https://github.com/ceph/go-ceph) from 0.34.0 to 0.35.0. - [Release notes](https://github.com/ceph/go-ceph/releases) - [Changelog](https://github.com/ceph/go-ceph/blob/master/docs/release-process.md) - [Commits](https://github.com/ceph/go-ceph/compare/v0.34.0...v0.35.0) --- updated-dependencies: - dependency-name: github.com/ceph/go-ceph dependency-version: 0.35.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- .../github.com/ceph/go-ceph/cephfs/command.go | 10 ++++ .../github.com/ceph/go-ceph/cephfs/file_fd.go | 17 +++++++ .../ceph/go-ceph/cephfs/file_futime.go | 46 ++++++++++++++++++ .../ceph/go-ceph/cephfs/fscompat.go | 2 - .../github.com/ceph/go-ceph/rados/command.go | 48 +++++++++++++++++-- .../ceph/go-ceph/rbd/diff_iterate_by_id.go | 2 - .../ceph/go-ceph/rbd/encryption_opt_luks.go | 2 +- vendor/modules.txt | 2 +- 10 files changed, 122 insertions(+), 13 deletions(-) create mode 100644 vendor/github.com/ceph/go-ceph/cephfs/file_fd.go create mode 100644 vendor/github.com/ceph/go-ceph/cephfs/file_futime.go diff --git a/go.mod b/go.mod index 057c15d6a9d..555b56516d2 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423 - github.com/ceph/go-ceph v0.34.0 + github.com/ceph/go-ceph v0.35.0 github.com/container-storage-interface/spec v1.11.0 github.com/csi-addons/kubernetes-csi-addons v0.13.0 github.com/csi-addons/spec v0.2.1-0.20250610152019-b5a7205f6a79 diff --git a/go.sum b/go.sum index 900f0c9eb6e..d97d6b1f6d2 100644 --- a/go.sum +++ b/go.sum @@ -134,8 +134,8 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423 h1:Q7lE8B6IaLc3j7Af4uKS4Lgpc500g6IwIzpIVzzEbJM= github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423/go.mod h1:P+rCBJEUy2Yt6vOcTh7pyh9/g+o9pOZc7kyY6k/PS5U= -github.com/ceph/go-ceph v0.34.0 h1:C45yU8VRl0Rg+/I0qw5bzT337HG6DL0yBQ0VR6QHv4o= -github.com/ceph/go-ceph v0.34.0/go.mod h1:otRLwpVgM81lK5zdGYOfr4OELdeS97luDBE/PjXAB5o= +github.com/ceph/go-ceph v0.35.0 h1:wcDUbsjeNJ7OfbWCE7I5prqUL794uXchopw3IvrGQkk= +github.com/ceph/go-ceph v0.35.0/go.mod h1:ILF8WKhQQ2p2YuX1oWigkmsfT39U8T/HS2NrqxExq2s= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= diff --git a/vendor/github.com/ceph/go-ceph/cephfs/command.go b/vendor/github.com/ceph/go-ceph/cephfs/command.go index 709566a5300..962a4271406 100644 --- a/vendor/github.com/ceph/go-ceph/cephfs/command.go +++ b/vendor/github.com/ceph/go-ceph/cephfs/command.go @@ -19,12 +19,22 @@ func cephBufferFree(p unsafe.Pointer) { } // MdsCommand sends commands to the specified MDS. +// +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. func (mount *MountInfo) MdsCommand(mdsSpec string, args [][]byte) ([]byte, string, error) { return mount.mdsCommand(mdsSpec, args, nil) } // MdsCommandWithInputBuffer sends commands to the specified MDS, with an input // buffer. +// +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. func (mount *MountInfo) MdsCommandWithInputBuffer(mdsSpec string, args [][]byte, inputBuffer []byte) ([]byte, string, error) { return mount.mdsCommand(mdsSpec, args, inputBuffer) } diff --git a/vendor/github.com/ceph/go-ceph/cephfs/file_fd.go b/vendor/github.com/ceph/go-ceph/cephfs/file_fd.go new file mode 100644 index 00000000000..1c8b44037bf --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/cephfs/file_fd.go @@ -0,0 +1,17 @@ +//go:build ceph_preview + +package cephfs + +// Fd returns the integer open file descriptor in cephfs. +// NOTE: It doesn't make sense to consume the returned integer fd anywhere +// outside CephFS and is recommended not to do so given the undefined behaviour. +// Also, as seen with the Go standard library, the fd is only valid as long as +// the corresponding File object is intact in the sense that an fd from a closed +// File object is invalid. +func (f *File) Fd() int { + if f == nil || f.mount == nil { + return -1 + } + + return int(f.fd) +} diff --git a/vendor/github.com/ceph/go-ceph/cephfs/file_futime.go b/vendor/github.com/ceph/go-ceph/cephfs/file_futime.go new file mode 100644 index 00000000000..31382f56bd1 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/cephfs/file_futime.go @@ -0,0 +1,46 @@ +//go:build ceph_preview + +package cephfs + +// Futime changes file/directory last access and modification times. +// +// Implements: +// +// int ceph_futime(struct ceph_mount_info *cmount, int fd, struct utimbuf *buf); +func (f *File) Futime(times *Utime) error { + if err := f.validate(); err != nil { + return err + } + + return f.mount.Futime(int(f.fd), times) +} + +// Futimens changes file/directory last access and modification times, here times param +// is an array of Timespec struct having length 2, where times[0] represents the access time +// and times[1] represents the modification time. +// +// Implements: +// +// int ceph_futimens(struct ceph_mount_info *cmount, int fd, struct timespec times[2]); +func (f *File) Futimens(times []Timespec) error { + if err := f.validate(); err != nil { + return err + } + + return f.mount.Futimens(int(f.fd), times) +} + +// Futimes changes file/directory last access and modification times, here times param +// is an array of Timeval struct type having length 2, where times[0] represents the access time +// and times[1] represents the modification time. +// +// Implements: +// +// int ceph_futimes(struct ceph_mount_info *cmount, int fd, struct timeval times[2]); +func (f *File) Futimes(times []Timeval) error { + if err := f.validate(); err != nil { + return err + } + + return f.mount.Futimes(int(f.fd), times) +} diff --git a/vendor/github.com/ceph/go-ceph/cephfs/fscompat.go b/vendor/github.com/ceph/go-ceph/cephfs/fscompat.go index 84b33906a01..b621c2fd1d7 100644 --- a/vendor/github.com/ceph/go-ceph/cephfs/fscompat.go +++ b/vendor/github.com/ceph/go-ceph/cephfs/fscompat.go @@ -1,5 +1,3 @@ -//go:build ceph_preview - package cephfs import ( diff --git a/vendor/github.com/ceph/go-ceph/rados/command.go b/vendor/github.com/ceph/go-ceph/rados/command.go index 3694abf573c..8ca6d6b9cea 100644 --- a/vendor/github.com/ceph/go-ceph/rados/command.go +++ b/vendor/github.com/ceph/go-ceph/rados/command.go @@ -43,6 +43,11 @@ func (c *Conn) MonCommandWithInputBuffer(args, inputBuffer []byte) ([]byte, stri // PGCommand sends a command to one of the PGs // +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. +// // Implements: // // int rados_pg_command(rados_t cluster, const char *pgstr, @@ -56,6 +61,11 @@ func (c *Conn) PGCommand(pgid []byte, args [][]byte) ([]byte, string, error) { // PGCommandWithInputBuffer sends a command to one of the PGs, with an input buffer // +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. +// // Implements: // // int rados_pg_command(rados_t cluster, const char *pgstr, @@ -87,19 +97,29 @@ func (c *Conn) PGCommandWithInputBuffer(pgid []byte, args [][]byte, inputBuffer } // MgrCommand sends a command to a ceph-mgr. +// +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. func (c *Conn) MgrCommand(args [][]byte) ([]byte, string, error) { return c.MgrCommandWithInputBuffer(args, nil) } // MgrCommandWithInputBuffer sends a command, with an input buffer, to a ceph-mgr. // +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. +// // Implements: // // int rados_mgr_command(rados_t cluster, const char **cmd, -// size_t cmdlen, const char *inbuf, -// size_t inbuflen, char **outbuf, -// size_t *outbuflen, char **outs, -// size_t *outslen); +// size_t cmdlen, const char *inbuf, +// size_t inbuflen, char **outbuf, +// size_t *outbuflen, char **outs, +// size_t *outslen); func (c *Conn) MgrCommandWithInputBuffer(args [][]byte, inputBuffer []byte) ([]byte, string, error) { ci := cutil.NewCommandInput(args, inputBuffer) defer ci.Free() @@ -121,6 +141,11 @@ func (c *Conn) MgrCommandWithInputBuffer(args [][]byte, inputBuffer []byte) ([]b } // OsdCommand sends a command to the specified ceph OSD. +// +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. func (c *Conn) OsdCommand(osd int, args [][]byte) ([]byte, string, error) { return c.OsdCommandWithInputBuffer(osd, args, nil) } @@ -128,6 +153,11 @@ func (c *Conn) OsdCommand(osd int, args [][]byte) ([]byte, string, error) { // OsdCommandWithInputBuffer sends a command, with an input buffer, to the // specified ceph OSD. // +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. +// // Implements: // // int rados_osd_command(rados_t cluster, int osdid, @@ -159,12 +189,22 @@ func (c *Conn) OsdCommandWithInputBuffer( } // MonCommandTarget sends a command to a specified monitor. +// +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. func (c *Conn) MonCommandTarget(name string, args [][]byte) ([]byte, string, error) { return c.MonCommandTargetWithInputBuffer(name, args, nil) } // MonCommandTargetWithInputBuffer sends a command, with an input buffer, to a specified monitor. // +// The args parameter takes a slice of byte slices but typically a single +// slice element is sufficient. The use of two slices exists to best match +// the structure of the underlying C call which is often a legacy interface +// in Ceph. +// // Implements: // // int rados_mon_command_target(rados_t cluster, const char *name, diff --git a/vendor/github.com/ceph/go-ceph/rbd/diff_iterate_by_id.go b/vendor/github.com/ceph/go-ceph/rbd/diff_iterate_by_id.go index 5edb562e883..69f55e95141 100644 --- a/vendor/github.com/ceph/go-ceph/rbd/diff_iterate_by_id.go +++ b/vendor/github.com/ceph/go-ceph/rbd/diff_iterate_by_id.go @@ -1,5 +1,3 @@ -//go:build ceph_preview - package rbd /* diff --git a/vendor/github.com/ceph/go-ceph/rbd/encryption_opt_luks.go b/vendor/github.com/ceph/go-ceph/rbd/encryption_opt_luks.go index 363b780cbc0..1c65f279b2c 100644 --- a/vendor/github.com/ceph/go-ceph/rbd/encryption_opt_luks.go +++ b/vendor/github.com/ceph/go-ceph/rbd/encryption_opt_luks.go @@ -1,4 +1,4 @@ -//go:build !octopus && !pacific && !quincy && ceph_preview +//go:build !octopus && !pacific && !quincy package rbd diff --git a/vendor/modules.txt b/vendor/modules.txt index 0f6cceb0d39..c366991f838 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -199,7 +199,7 @@ github.com/ceph/ceph-csi/api/deploy/ocp # github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423 ## explicit; go 1.24.4 github.com/ceph/ceph-nvmeof/lib/go/nvmeof -# github.com/ceph/go-ceph v0.34.0 +# github.com/ceph/go-ceph v0.35.0 ## explicit; go 1.23.0 github.com/ceph/go-ceph/cephfs github.com/ceph/go-ceph/cephfs/admin From 075487210acac628a928057c2f43f1893c03c45e Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Tue, 19 Aug 2025 15:51:34 +0200 Subject: [PATCH 016/157] nvmeof: prevent concurrent requests for the same volume On occasion the container orchitrator (Kubernetes) will retry CSI operations that have timed out. In certain failure scenarios, these operations are actually still active, and just blocked (or very slow) at the Ceph cluster. A 2nd try of the same operation should not be attempted, as it mau cause conflicts and will likely result in the slow/hanging situation again. By protecting concurrent operations for the same volume and reporting a failure, users at least can see that something is malfunctioning. Signed-off-by: Niels de Vos --- .../nvmeof/controller/controllerserver.go | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go index dbeb1e4c02b..98588f21664 100644 --- a/internal/nvmeof/controller/controllerserver.go +++ b/internal/nvmeof/controller/controllerserver.go @@ -31,12 +31,17 @@ import ( "github.com/ceph/ceph-csi/internal/rbd" rbdutil "github.com/ceph/ceph-csi/internal/rbd" rbddriver "github.com/ceph/ceph-csi/internal/rbd/driver" + "github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log" ) type Server struct { csi.UnimplementedControllerServer + // A map storing all volumes with ongoing operations so that additional operations + // for that same volume (as defined by VolumeID/volume name) return an Aborted error + volumeLocks *util.VolumeLocks + // backendServer handles the RBD requests backendServer *rbd.ControllerServer } @@ -44,6 +49,7 @@ type Server struct { // NewControllerServer initialize a controller server for nvmeof CSI driver. func NewControllerServer(d *csicommon.CSIDriver) (*Server, error) { return &Server{ + volumeLocks: util.NewVolumeLocks(), backendServer: rbddriver.NewControllerServer(d), }, nil } @@ -80,6 +86,14 @@ func (cs *Server) CreateVolume( return nil, status.Errorf(codes.InvalidArgument, "request validation failed: %v", err) } + // prevent concurrent requests for the same volume + if acquired := cs.volumeLocks.TryAcquire(req.GetName()); !acquired { + log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, req.GetName()) + + return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetName()) + } + defer cs.volumeLocks.Release(req.GetName()) + // Step 1: Create RBD volume through backend. if exists, it is ok. res, err := cs.backendServer.CreateVolume(ctx, req) if err != nil { @@ -122,6 +136,15 @@ func (cs *Server) DeleteVolume( if volumeID == "" { return nil, status.Errorf(codes.InvalidArgument, "empty volume ID in request") } + + // prevent concurrent requests for the same volume + if acquired := cs.volumeLocks.TryAcquire(volumeID); !acquired { + log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID) + + return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) + } + defer cs.volumeLocks.Release(volumeID) + // Get NVMe-oF metadata for cleanup nvmeofData, err := cs.getNVMeoFMetadata(ctx, req, volumeID) if err != nil { From a586af5284f7a73eef9c905ce93cba2a95418f17 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Thu, 21 Aug 2025 14:17:26 +0200 Subject: [PATCH 017/157] ci: prevent running multiple e2e suites at the same time Mergify started to batch PRs into a "merge PR". This can cause multiple e2e suites to run at the same time. As there is a limit on the number of machines where we run our tests, many jobs get into a waiting state for reserving a machine. When different PRs run the e2e suite at the same time, only one result will be used for merging that particular PR. The results of other PRs are thrown away, as they need to be rebased and re-tested again. By preventing batching of PRs, there should be fewer "CI waste", and PRs can get tested a little more efficiently. Signed-off-by: Niels de Vos --- .mergify.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 83c907ed907..86081e38b5c 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -79,6 +79,10 @@ queue_rules: - "status-success=ci/centos/jjb-validate" merge_method: rebase update_method: rebase + batch_size: 1 + +merge_queue: + max_parallel_checks: 1 pull_request_rules: - name: start CI jobs for PRs in the merge queue From 0fb3efbd7349ebeea751413eda056b321789e5ba Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 21 Aug 2025 12:18:22 +0530 Subject: [PATCH 018/157] rbd: set/unset userIdMapping matadata This commit does the following operations: 1. In NodeStageVolume(): sets nodeId:userId mapping in image metadata. The userId is the ceph user used for mounting the subvolume. 2. In ControllerUnpulishVolume(): removes the nodeId:userId mapping from the image metadata. Signed-off-by: Praveen M --- internal/rbd/controllerserver.go | 37 ++++++++++++++++++++++++++++++++ internal/rbd/driver/driver.go | 2 +- internal/rbd/nodeserver.go | 31 ++++++++++++++++++++++++++ internal/rbd/rbd_util.go | 9 ++++++++ 4 files changed, 78 insertions(+), 1 deletion(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index ffb084a6320..00bf5cfa824 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -1801,6 +1801,11 @@ func (cs *ControllerServer) ControllerUnpublishVolume( } defer rv.Destroy(ctx) + err = cs.removeUserIdMapping(ctx, req.GetNodeId(), rv) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to remove user ID mapping for node %s: %v", req.GetNodeId(), err) + } + err = cs.fenceNode(ctx, req.GetNodeId(), rv, credentials) if err != nil { return nil, status.Errorf(codes.Internal, "failed to fence node %s: %v", req.GetNodeId(), err) @@ -1809,6 +1814,38 @@ func (cs *ControllerServer) ControllerUnpublishVolume( return &csi.ControllerUnpublishVolumeResponse{}, nil } +// removeUserIdMapping attempts to remove nodeId:userId mapping in metadata the RBD volume. +// +// Parameters: +// - nodeId: The ID of the node that we want to remove the user ID mapping for. +// - rbdVolume: The rbdVolume object representing the RBD image. +// +// Behavior: +// - If the '--setmetadata' flag is set to false in CSI driver configuration, does nothing. +// - Removes the user ID mapping metadata for the specified nodeId from the RBD image. +func (cs *ControllerServer) removeUserIdMapping( + ctx context.Context, + nodeId string, + rv *rbdVolume, +) error { + if !cs.SetMetadata { + return nil + } + if nodeId == "" { + return errors.New("nodeId cannot be empty") + } + + metadataKey := getUserIDMappingKey(rv.VolID, nodeId) + err := rv.RemoveMetadata(metadataKey) + if err != nil && !errors.Is(err, librbd.ErrNotExist) { + return fmt.Errorf("failed to remove user ID mapping for node %s on image %s: %w", nodeId, rv, err) + } + + log.DebugLog(ctx, "successfully removed user ID mapping for nodeId %s", nodeId) + + return nil +} + // fenceNode attempts to fence a client node from accessing the RBD volume. // // Parameters: diff --git a/internal/rbd/driver/driver.go b/internal/rbd/driver/driver.go index cb9c1330878..a01fd7f9861 100644 --- a/internal/rbd/driver/driver.go +++ b/internal/rbd/driver/driver.go @@ -163,7 +163,7 @@ func (r *Driver) Run(conf *util.Config) { log.FatalLogMsg("%v", err.Error()) } r.ns = NewNodeServer(r.cd, conf.Vtype, nodeLabels, topology, crushLocationMap) - + r.ns.SetMetadata = conf.SetMetadata var attr string attr, err = rbd.GetKrbdSupportedFeatures() if err != nil && !errors.Is(err, os.ErrNotExist) { diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 9c520883237..92a690bec17 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -55,6 +55,9 @@ type NodeServer struct { ext4HasPrezeroedSupport featureFlag // xfsHasReflinkSupport indicates whether the xfs filesystem has support for reflink. xfsHasReflinkSupport featureFlag + + // set metadata on volume + SetMetadata bool } // stageTransaction struct represents the state a transaction was when it either completed @@ -398,6 +401,11 @@ func (ns *NodeServer) NodeStageVolume( return &csi.NodeStageVolumeResponse{}, nil } + // Set UserId mapping in the image metadata + err = ns.setUserIdMapping(ctx, cr, rv, isStaticVol) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to set userId mapping for %s: %v", rv, err) + } // Set ClientAddress in the image metadata err = ns.setClientAddress(ctx, cr, rv, isStaticVol) @@ -432,6 +440,29 @@ func (ns *NodeServer) NodeStageVolume( return &csi.NodeStageVolumeResponse{}, nil } +// setUserIdMapping sets the user ID mapping in the RBD image metadata. +// The user ID is the ceph user used for mounting the RBD image. +// If the '--setmetadata' flag is set to false in CSI driver configuration or if the volume is static +// this function does nothing. +func (ns *NodeServer) setUserIdMapping( + ctx context.Context, cr *util.Credentials, rv *rbdVolume, isStaticVol bool, +) error { + if !ns.SetMetadata || isStaticVol { + return nil + } + + nodeId := ns.Driver.GetNodeID() + metadataKey := getUserIDMappingKey(rv.VolID, nodeId) + err := rv.SetMetadata(metadataKey, cr.ID) + if err != nil { + return fmt.Errorf("failed to set client address for %s: %w", rv, err) + } + + log.DebugLog(ctx, "user ID mapping %s set in metadata for image %s", metadataKey, rv) + + return nil +} + // setClientAddress extracts the client IP address and stores it in the RBD image metadata. // The connection.GetAddrs() method returns an address in the format "10.244.0.1:0/2686266785". // We parse this to extract just the IP address portion (e.g., "10.244.0.1") and store in metadata. diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index ef454fa7c76..5119000efe4 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -91,6 +91,10 @@ const ( // starts with `.` to avoid copying it to the mirrored image. clientAddressKey = ".rbd.csi.ceph.com/clientaddress" + // userIdMappingKey is the key used to store the userID mapping. + // starts with `.` to avoid copying it to the mirrored image. + userIdMappingKey = ".rbd.csi.ceph.com/userid" + // Suffix added to the temp cloned image name. // This will always be (rbd image name + "-temp"). tempImageSuffix = "-temp" @@ -291,6 +295,11 @@ func getClientAddressKey(volumeId, nodeId string) string { return fmt.Sprintf("%s/%s/%s", clientAddressKey, volumeId, nodeId) } +// getUserIDMappingKey returns the key for storing user ID mapping metadata for a specific node. +func getUserIDMappingKey(volumeID, nodeID string) string { + return fmt.Sprintf("%s/%s/%s", userIdMappingKey, volumeID, nodeID) +} + // prepareKrbdFeatureAttrs prepare krbd fearure set based on kernel version. // Minimum kernel version should be 3.8, else it will return error. func prepareKrbdFeatureAttrs() (uint64, error) { From 6971282f25a5559e3863a41010fa07eb90c0e17d Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 21 Aug 2025 13:15:40 +0530 Subject: [PATCH 019/157] cephfs: set/unset userIdMapping metadata This commit does the following operations: 1. In NodeStageVolume(): sets nodeId:userId mapping in subvolume metadata. The userId is the ceph user used for mounting the subvolume. 2. In ControllerUnpulishVolume(): removes the nodeId:userId mapping from the subvolume metadata. Signed-off-by: Praveen M --- internal/cephfs/controllerserver.go | 50 ++++++++++++++++++++++++ internal/cephfs/core/metadata.go | 10 +++++ internal/cephfs/driver.go | 1 + internal/cephfs/nodeserver.go | 60 +++++++++++++++++++++++++++++ 4 files changed, 121 insertions(+) diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index 2936a8e4fe6..92168f7e6da 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -1233,6 +1233,11 @@ func (cs *ControllerServer) ControllerUnpublishVolume( } defer volOptions.Destroy() + err = cs.removeUserIdMapping(ctx, volumeId, req.GetNodeId(), secrets, volOptions) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + err = cs.fenceNode(ctx, volumeId, req.GetNodeId(), secrets, volOptions) if err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -1285,6 +1290,51 @@ func (cs *ControllerServer) getSubvolumeMetadataHandler( }, nil } +// removeUserIdMapping attempts to remove nodeId:userId mapping in metadata from the subvolume. +// +// Parameters: +// - volumeId: The ID of the volume for which we want to remove the user ID mapping. +// - nodeId: The ID of the node that we want to remove the user ID mapping for. +// - volOptions: The volume options for the CephFS subvolume. +// +// Behavior: +// - If the '--setmetadata' flag is set to false in CSI driver configuration, does nothing. +// - Removes the user ID mapping metadata for the specified nodeId from the subvolume. +func (cs *ControllerServer) removeUserIdMapping( + ctx context.Context, + volumeId, nodeId string, + secrets map[string]string, + volOptions *store.VolumeOptions, +) error { + if !cs.SetMetadata { + return nil + } + if nodeId == "" { + return errors.New("nodeId cannot be empty") + } + + cr, err := util.NewAdminCredentials(secrets) + if err != nil { + return fmt.Errorf("failed to create credentials from secrets: %w", err) + } + defer cr.DeleteCredentials() + + metadataKey := core.GetUserIDMappingKey(volumeId, nodeId) + handler, err := cs.getSubvolumeMetadataHandler(ctx, volOptions, secrets) + if err != nil { + return err + } + + err = handler.unsetAllMetadataFunc([]string{metadataKey}) + if err != nil { + return fmt.Errorf("failed to remove metadata %s from %s: %w", metadataKey, handler.logSubvolumeName, err) + } + + log.DebugLog(ctx, "userID mapping metadata %s unset for %s", metadataKey, handler.logSubvolumeName) + + return nil +} + // fenceNode attempts to fence a client node from accessing the CephFS subvolume. // // Parameters: diff --git a/internal/cephfs/core/metadata.go b/internal/cephfs/core/metadata.go index 5d462476ada..717a6d02546 100644 --- a/internal/cephfs/core/metadata.go +++ b/internal/cephfs/core/metadata.go @@ -31,6 +31,10 @@ const ( // clientAddressKey is the key used to store the client address. // starts with `.` to avoid copying it to the mirrored subvolume. clientAddressKey = ".cephfs.csi.ceph.com/clientaddress" + + // userIdMappingKey is the key used to store the userID mapping. + // starts with `.` to avoid copying it to the mirrored subvolume. + userIdMappingKey = ".cephfs.csi.ceph.com/userid" ) // ErrSubVolMetadataNotSupported is returned when set/get/list/remove subvolume metadata options are not supported. @@ -42,6 +46,12 @@ func GetClientAddressKey(volumeId, nodeId string) string { return fmt.Sprintf("%s/%s/%s", clientAddressKey, volumeId, nodeId) } +// GetUserIDMappingKey returns the key to store the user ID mapping in the +// subvolume metadata. +func GetUserIDMappingKey(volumeID, nodeID string) string { + return fmt.Sprintf("%s/%s/%s", userIdMappingKey, volumeID, nodeID) +} + func (s *subVolumeClient) supportsSubVolMetadata() bool { newLocalClusterState(s.clusterID) diff --git a/internal/cephfs/driver.go b/internal/cephfs/driver.go index 808ef11da94..8a559850e69 100644 --- a/internal/cephfs/driver.go +++ b/internal/cephfs/driver.go @@ -167,6 +167,7 @@ func (fs *Driver) Run(conf *util.Config) { conf.KernelMountOptions, conf.FuseMountOptions, nodeLabels, topology, crushLocationMap, ) + fs.ns.setMetadata = true } if conf.IsControllerServer { diff --git a/internal/cephfs/nodeserver.go b/internal/cephfs/nodeserver.go index c7d1ff4acbb..43f7b7296b1 100644 --- a/internal/cephfs/nodeserver.go +++ b/internal/cephfs/nodeserver.go @@ -53,6 +53,9 @@ type NodeServer struct { kernelMountOptions string fuseMountOptions string healthChecker hc.Manager + + // set metadata on volume + setMetadata bool } func getCredentialsForVolume( @@ -274,6 +277,11 @@ func (ns *NodeServer) NodeStageVolume( return nil, status.Error(codes.Internal, err.Error()) } + err = ns.setUserIdMapping(ctx, req.GetSecrets(), req.GetVolumeId(), volOptions) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + err = ns.setClientAddress(ctx, req.GetVolumeId(), req.GetSecrets(), volOptions) if err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -333,6 +341,58 @@ func (ns *NodeServer) NodeStageVolume( return &csi.NodeStageVolumeResponse{}, nil } +// setUserIdMapping sets the user ID mapping in the CephFS subvolume metadata. +// The user ID is the ceph user used for mounting the subvolume. +// If the volume is a snapshot-backed, the user ID mapping is set on the backing snapshot metadata. +// If the '--setmetadata' flag is set to false in CSI driver configuration or if the volume is static it does nothing. +func (ns *NodeServer) setUserIdMapping( + ctx context.Context, secrets map[string]string, + volumeId string, volOptions *store.VolumeOptions, +) error { + if !ns.setMetadata || !volOptions.ProvisionVolume { + return nil + } + + conn := volOptions.GetConnection() + subvolumeClient := core.NewSubVolume(conn, &volOptions.SubVolume, volOptions.ClusterID, "", true) + metadataKey := core.GetUserIDMappingKey(volumeId, ns.Driver.GetNodeID()) + params := map[string]string{metadataKey: conn.Creds.ID} + + if volOptions.BackingSnapshot { + cr, err := util.NewAdminCredentials(secrets) + if err != nil { + return fmt.Errorf("failed to create credentials from secrets: %w", err) + } + defer cr.DeleteCredentials() + + volOpt, _, sid, err := store.NewSnapshotOptionsFromID(ctx, volOptions.BackingSnapshotID, cr, secrets, "", true) + if err != nil { + return err + } + defer volOpt.Destroy() + snapClient := core.NewSnapshot(conn, sid.FsSnapshotName, volOpt.ClusterID, "", true, &volOpt.SubVolume) + if err = snapClient.SetAllSnapshotMetadata(params); err != nil { + return fmt.Errorf("failed to set user ID mapping metadata %s for subvolume snapshot %s: %w", + metadataKey, sid.FsSnapshotName, err) + } + + log.DebugLog(ctx, "userID mapping metadata %s set for subvolume snapshot %s", + metadataKey, sid.FsSnapshotName) + + return nil + } + + err := subvolumeClient.SetAllMetadata(params) + if err != nil { + return fmt.Errorf("failed to set user ID mapping %s for subvolume %s: %w", + metadataKey, volOptions.SubVolume.VolID, err) + } + + log.DebugLog(ctx, "userID mapping metadata %s set for subvolume %s", metadataKey, volOptions.SubVolume.VolID) + + return nil +} + // setClientAddress extracts the client IP address and stores it in the subvolume metadata. // The connection.GetAddrs() method returns an address in the format "10.244.0.1:0/2686266785". // We parse this to extract just the IP address portion (e.g., "10.244.0.1") and store in metadata. From 05f34728ac9c346d7457f9205aa4dd6f19e4c428 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 21 Aug 2025 14:08:41 +0530 Subject: [PATCH 020/157] e2e: add tests to verify userid mapping metadata Signed-off-by: Praveen M --- e2e/cephfs.go | 26 ++++++ e2e/cephfs_helper.go | 29 +++++++ e2e/pod.go | 193 +++++++++++++++++++++++++++++-------------- e2e/rbd.go | 11 +++ 4 files changed, 199 insertions(+), 60 deletions(-) diff --git a/e2e/cephfs.go b/e2e/cephfs.go index c06c8f3369e..3119c4c360c 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -363,6 +363,26 @@ var _ = Describe(cephfsType, func() { }) } + By("verify userId mapping metadata exists", func() { + err := createCephfsStorageClass(f.ClientSet, f, true, nil) + if err != nil { + framework.Failf("failed to create CephFS storageclass: %v", err) + } + + err = verifyUserIdMappingMetadata(f, pvcPath, appPath, cephfsType) + if err != nil { + framework.Failf("failed to verify userId mapping metadata exists: %v", err) + } + + validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) + validateOmapCount(f, 0, cephfsType, metadataPool, volumesType) + + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS storageclass: %v", err) + } + }) + By("verify client address metadata exists", func() { err := createCephfsStorageClass(f.ClientSet, f, true, nil) if err != nil { @@ -1977,6 +1997,12 @@ var _ = Describe(cephfsType, func() { if err != nil { logAndFail("failed to verify client address metadata of snapshot-backed PVC: %v", err) } + err = verifyUserIdMappingMetadataSnapshotBacked( + f, pvcClone, appClone, backingSubvolumeNameData.imageName, backingSnapshotName, + ) + if err != nil { + logAndFail("failed to verify user ID mapping metadata of snapshot-backed PVC: %v", err) + } // Snapshot-backed volume shouldn't contribute to total subvolume count. validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup) diff --git a/e2e/cephfs_helper.go b/e2e/cephfs_helper.go index 17eac7365f5..496b6f35a23 100644 --- a/e2e/cephfs_helper.go +++ b/e2e/cephfs_helper.go @@ -690,3 +690,32 @@ func verifyClientAddressMetadataSnapshotBacked( return nil } + +func verifyUserIdMappingMetadataSnapshotBacked( + f *framework.Framework, + pvc *v1.PersistentVolumeClaim, + pod *v1.Pod, + subVolumeName, snapshotName string, +) error { + nodeId := pod.Spec.NodeName + _, pvObject, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) + if err != nil { + return fmt.Errorf("failed to get PVC and PV: %w", err) + } + volumeHandle := pvObject.Spec.CSI.VolumeHandle + + expectedValue := keyringCephFSNodePluginUsername + metadataKey := fmt.Sprintf(".cephfs.csi.ceph.com/userid/%s/%s", volumeHandle, nodeId) + metadataValue, err := getCephFSSnapshotMetadata( + f, fileSystemName, subVolumeName, snapshotName, subvolumegroup, metadataKey) + if err != nil { + return fmt.Errorf("failed to get subvolume snapshot metadata %s: %w", metadataKey, err) + } + + if metadataValue != expectedValue { + return fmt.Errorf("userId mapping metadata %s has unexpected value %s, expected %s", + metadataKey, metadataValue, expectedValue) + } + + return nil +} diff --git a/e2e/pod.go b/e2e/pod.go index a08f4e0b64a..53fa19e74a2 100644 --- a/e2e/pod.go +++ b/e2e/pod.go @@ -688,7 +688,7 @@ func verifyReadAffinity( configInfos, stdErr, err = execCommandInContainer(f, command, ns, cn, &opt) if strings.TrimSpace(configInfos) != "" { framework.Logf("configInfos: %v, err=%v", configInfos, err) - //override error if the configInfo is found. + // override error if the configInfo is found. err = nil break } @@ -748,119 +748,192 @@ func verifyReadAffinity( return nil } -func verifyClientAddressMetadataExists( +func verifyMetadataRemoved( f *framework.Framework, - pvcPath, appPath string, - driverType string, + metadataKey, driverType, rbdImageSpec, subvolumeName string, ) error { - // create PVC + // verify metadata is removed after pod deletion + // retry up to 3 times with 10-second intervals + var ( + metadataValue string + err error + ) + maxRetries := 3 + retryInterval := 10 * time.Second + + for attempt := 1; attempt <= maxRetries; attempt++ { + switch driverType { + case rbdType: + metadataValue, err = getImageMeta(rbdImageSpec, metadataKey, f) + case cephfsType: + metadataValue, err = getCephFSSubvolumeMetadata( + f, fileSystemName, subvolumeName, subvolumegroup, metadataKey) + } + + if err != nil && strings.Contains(err.Error(), "command terminated with exit code 2") { + framework.Logf("metadata %s successfully removed", metadataKey) + break + } + + framework.Logf("metadata %s still exists with value %s, retrying (%d/%d)", + metadataKey, metadataValue, attempt, maxRetries) + + if attempt == maxRetries { + return fmt.Errorf("metadata %s still exists with value %s (after %d attempts)", + metadataKey, metadataValue, maxRetries) + } + time.Sleep(retryInterval) + } + + return nil +} + +func getAppAndPVC( + f *framework.Framework, + pvcPath, appPath string, +) (*v1.Pod, *v1.PersistentVolumeClaim, error) { pvc, err := loadPVC(pvcPath) if err != nil { - return fmt.Errorf("failed to load pvc: %w", err) + return nil, nil, fmt.Errorf("failed to load pvc: %w", err) } pvc.Namespace = f.UniqueName - err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) - if err != nil { - return fmt.Errorf("failed to create PVC: %w", err) + if err := createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout); err != nil { + return nil, nil, fmt.Errorf("failed to create PVC: %w", err) } app, err := loadApp(appPath) if err != nil { - return fmt.Errorf("failed to load application: %w", err) + return nil, nil, fmt.Errorf("failed to load application: %w", err) } app.Namespace = f.UniqueName - err = createApp(f.ClientSet, app, deployTimeout) - if err != nil { - return fmt.Errorf("failed to create application: %w", err) + if err := createApp(f.ClientSet, app, deployTimeout); err != nil { + return nil, nil, fmt.Errorf("failed to create application: %w", err) } pod, err := getPod(f.ClientSet, app.Namespace, app.Name) if err != nil { - return fmt.Errorf("failed to get pod: %w", err) + return nil, nil, fmt.Errorf("failed to get pod: %w", err) } - nodeId := pod.Spec.NodeName + pvc, _, err = getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) + if err != nil { + return nil, nil, fmt.Errorf("failed to get PVC and PV: %w", err) + } + + return pod, pvc, nil +} + +func verifyClientAddressMetadataExists( + f *framework.Framework, + pvcPath, appPath string, + driverType string, +) error { var ( metadataKey string metadataValue string rbdImageSpec string - subVolumeName string + subvolumeName string + err error ) - _, pvObject, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) + pod, pvc, err := getAppAndPVC(f, pvcPath, appPath) + if err != nil { + return err + } + _, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) if err != nil { - framework.Logf("error getting pvc %q in namespace %q: %v", pvc.Name, pvc.Namespace, err) return fmt.Errorf("failed to get PVC and PV: %w", err) } - volumeHandle := pvObject.Spec.CSI.VolumeHandle - // First, get the metadata while the pod is running to verify it exists + nodeId := pod.Spec.NodeName + volumeHandle := pv.Spec.CSI.VolumeHandle switch driverType { case rbdType: metadataKey = fmt.Sprintf(".rbd.csi.ceph.com/clientaddress/%s/%s", volumeHandle, nodeId) - imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) if err != nil { return err } - rbdImageSpec = imageSpec(defaultRBDPool, imageData.imageName) metadataValue, err = getImageMeta(rbdImageSpec, metadataKey, f) - if err != nil { - return fmt.Errorf("failed to get image metadata %s: %w", metadataKey, err) - } case cephfsType: metadataKey = fmt.Sprintf(".cephfs.csi.ceph.com/clientaddress/%s/%s", volumeHandle, nodeId) - subVolumeName = pvObject.Spec.CSI.VolumeAttributes["subvolumeName"] - metadataValue, err = getCephFSSubvolumeMetadata( - f, fileSystemName, subVolumeName, subvolumegroup, metadataKey) - if err != nil { - return fmt.Errorf("failed to get subvolume metadata %s: %w", metadataKey, err) - } + subvolumeName = pv.Spec.CSI.VolumeAttributes["subvolumeName"] + metadataValue, err = getCephFSSubvolumeMetadata(f, fileSystemName, subvolumeName, subvolumegroup, metadataKey) + } + if err != nil { + return fmt.Errorf("failed to get metadata %s: %w", metadataKey, err) } - if metadataValue == "" { return fmt.Errorf("client address metadata %s value is empty", metadataKey) } - err = deletePod(pod.Name, pod.Namespace, f.ClientSet, deployTimeout) - if err != nil { + if err := deletePod(pod.Name, pod.Namespace, f.ClientSet, deployTimeout); err != nil { return fmt.Errorf("failed to delete pod: %w", err) } - // verify clientaddress metadata is removed after pod deletion - // retry up to 3 times with 10-second intervals - maxRetries := 3 - retryInterval := 10 * time.Second + if err := verifyMetadataRemoved(f, metadataKey, driverType, rbdImageSpec, subvolumeName); err != nil { + return fmt.Errorf("failed to verify metadata removal: %w", err) + } - for attempt := 1; attempt <= maxRetries; attempt++ { - switch driverType { - case rbdType: - metadataValue, err = getImageMeta(rbdImageSpec, metadataKey, f) - case cephfsType: - metadataValue, err = getCephFSSubvolumeMetadata( - f, fileSystemName, subVolumeName, subvolumegroup, metadataKey) - } + return deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) +} - if err != nil && strings.Contains(err.Error(), "command terminated with exit code 2") { - framework.Logf("clientaddress metadata %s successfully removed", metadataKey) - break - } +func verifyUserIdMappingMetadata( + f *framework.Framework, + pvcPath, appPath string, + driverType string, +) error { + var ( + metadataKey string + metadataValue string + expectedValue string + rbdImageSpec string + subvolumeName string + err error + ) - framework.Logf("clientaddress metadata %s still exists with value %s, retrying (%d/%d)", - metadataKey, metadataValue, attempt, maxRetries) + pod, pvc, err := getAppAndPVC(f, pvcPath, appPath) + if err != nil { + return err + } + _, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) + if err != nil { + return fmt.Errorf("failed to get PVC and PV: %w", err) + } - if attempt == maxRetries { - return fmt.Errorf("clientaddress metadata %s still exists with value %s (after %d attempts)", - metadataKey, metadataValue, maxRetries) + nodeId := pod.Spec.NodeName + volumeHandle := pv.Spec.CSI.VolumeHandle + switch driverType { + case rbdType: + expectedValue = keyringRBDNodePluginUsername + metadataKey = fmt.Sprintf(".rbd.csi.ceph.com/userid/%s/%s", volumeHandle, nodeId) + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err } - time.Sleep(retryInterval) + rbdImageSpec = imageSpec(defaultRBDPool, imageData.imageName) + metadataValue, err = getImageMeta(rbdImageSpec, metadataKey, f) + case cephfsType: + expectedValue = keyringCephFSNodePluginUsername + metadataKey = fmt.Sprintf(".cephfs.csi.ceph.com/userid/%s/%s", volumeHandle, nodeId) + subvolumeName = pv.Spec.CSI.VolumeAttributes["subvolumeName"] + metadataValue, err = getCephFSSubvolumeMetadata(f, fileSystemName, subvolumeName, subvolumegroup, metadataKey) } - - err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - return fmt.Errorf("failed to delete PVC: %w", err) + return fmt.Errorf("failed to get metadata %s: %w", metadataKey, err) + } + if metadataValue != expectedValue { + return fmt.Errorf("userId mapping metadata %s has unexpected value %s, expected %s", metadataKey, metadataValue, expectedValue) } - return nil + if err := deletePod(pod.Name, pod.Namespace, f.ClientSet, deployTimeout); err != nil { + return fmt.Errorf("failed to delete pod: %w", err) + } + + if err := verifyMetadataRemoved(f, metadataKey, driverType, rbdImageSpec, subvolumeName); err != nil { + return fmt.Errorf("failed to verify metadata removal: %w", err) + } + + return deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) } diff --git a/e2e/rbd.go b/e2e/rbd.go index 34243edd106..f2c7576121c 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -532,6 +532,17 @@ var _ = Describe("RBD", func() { }) } + By("verify userId mapping metadata exists", func() { + err := verifyUserIdMappingMetadata(f, pvcPath, appPath, rbdType) + if err != nil { + framework.Failf("failed to verify userId mapping metadata exists: %v", err) + } + + // validate created backend rbd images + validateRBDImageCount(f, 0, defaultRBDPool) + validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType) + }) + By("verify client address metadata exists", func() { err := verifyClientAddressMetadataExists(f, pvcPath, appPath, rbdType) if err != nil { From df84cabc0b0330bd33c2478d45076962b00e9bad Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 21 Aug 2025 14:09:01 +0530 Subject: [PATCH 021/157] doc: update PendingReleaseNotes.md Signed-off-by: Praveen M --- PendingReleaseNotes.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md index dd146e3ffaa..f7106ea9837 100644 --- a/PendingReleaseNotes.md +++ b/PendingReleaseNotes.md @@ -4,4 +4,7 @@ ## Features +- set nodeId:userId mapping in metadata [PR](https://github.com/ceph/ceph-csi/pull/5445) + - refer design doc for more details - [here](https://github.com/ceph/ceph-csi/blob/devel/docs/design/proposals/userID-mapping.md) + ## NOTE From 15a2bd26009982127cf39a4fc4dc2dfafe705b4c Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Tue, 26 Aug 2025 12:45:04 +0300 Subject: [PATCH 022/157] nvmeof: change the order create listener before create ns change the order: create listener to be located after create subsystem and before create ns. Signed-off-by: gadi-didi --- .../nvmeof/controller/controllerserver.go | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go index 98588f21664..3a4db8604e2 100644 --- a/internal/nvmeof/controller/controllerserver.go +++ b/internal/nvmeof/controller/controllerserver.go @@ -288,15 +288,7 @@ func createNVMeoFResources( return nil, fmt.Errorf("subsystem setup failed: %w", err) } - // Step 4: Create namespace - nsid, err := gateway.CreateNamespace(ctx, nvmeofData.SubsystemNQN, rbdPoolName, rbdImageName) - if err != nil { - return nil, fmt.Errorf("namespace creation failed: %w", err) - } - log.DebugLog(ctx, "Namespace created: %s/%s with NSID: %d", rbdPoolName, rbdImageName, nsid) - nvmeofData.NamespaceID = nsid - - // Step 5: Create listeners + // Step 4: Create listeners err = gateway.CreateListener(ctx, nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) if err != nil { return nil, fmt.Errorf("listener creation failed: %w", err) @@ -304,6 +296,14 @@ func createNVMeoFResources( log.DebugLog(ctx, "Listener created for subsystem %s at %s", nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) + // Step 5: Create namespace and set its uuid + nsid, err := gateway.CreateNamespace(ctx, nvmeofData.SubsystemNQN, rbdPoolName, rbdImageName) + if err != nil { + return nil, fmt.Errorf("namespace creation failed: %w", err) + } + log.DebugLog(ctx, "Namespace created: %s/%s with NSID: %d", rbdPoolName, rbdImageName, nsid) + nvmeofData.NamespaceID = nsid + // Step 6: Add host to subsystem if err := gateway.AddHost(ctx, nvmeofData.SubsystemNQN, nvmeofData.HostNQN); err != nil { return nil, fmt.Errorf("host addition failed: %w", err) @@ -345,7 +345,14 @@ func cleanupNVMeoFResources( } log.DebugLog(ctx, "Host %s removed from subsystem %s", nvmeofData.HostNQN, nvmeofData.SubsystemNQN) - // Step 3: Delete listener + // Step 3: Delete namespace + if err := gateway.DeleteNamespace(ctx, nvmeofData.SubsystemNQN, nvmeofData.NamespaceID); err != nil { + return fmt.Errorf("failed to delete namespace %d for subsystem %s: %w", + nvmeofData.NamespaceID, nvmeofData.SubsystemNQN, err) + } + log.DebugLog(ctx, "Namespace %d deleted for subsystem %s", nvmeofData.NamespaceID, nvmeofData.SubsystemNQN) + + // Step 4: Delete listener err = gateway.DeleteListener(ctx, nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) if err != nil { return fmt.Errorf("failed to delete listener for subsystem %s: %w", nvmeofData.SubsystemNQN, err) @@ -353,13 +360,6 @@ func cleanupNVMeoFResources( log.DebugLog(ctx, "Listener deleted for subsystem %s at %s", nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) - // Step 4: Delete namespace - if err := gateway.DeleteNamespace(ctx, nvmeofData.SubsystemNQN, nvmeofData.NamespaceID); err != nil { - return fmt.Errorf("failed to delete namespace %d for subsystem %s: %w", - nvmeofData.NamespaceID, nvmeofData.SubsystemNQN, err) - } - log.DebugLog(ctx, "Namespace %d deleted for subsystem %s", nvmeofData.NamespaceID, nvmeofData.SubsystemNQN) - // Step 5: Cleanup empty subsystem if err := cleanupEmptySubsystem(ctx, gateway, nvmeofData.SubsystemNQN); err != nil { return fmt.Errorf("failed to cleanup empty subsystem %s: %w", nvmeofData.SubsystemNQN, err) From 7e1516bea88fa486f0029e34e0f80cf902d23327 Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Tue, 26 Aug 2025 12:40:42 +0300 Subject: [PATCH 023/157] nvmeof: add ns-uuid property into NVMeoFVolumeData added, because in node plugin it is in used. Signed-off-by: gadi-didi --- .../nvmeof/controller/controllerserver.go | 36 ++++++++++++------- internal/nvmeof/nvmeof.go | 34 ++++++++++++++++++ 2 files changed, 58 insertions(+), 12 deletions(-) diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go index 3a4db8604e2..bde6e7d0097 100644 --- a/internal/nvmeof/controller/controllerserver.go +++ b/internal/nvmeof/controller/controllerserver.go @@ -252,9 +252,10 @@ func createNVMeoFResources( return nil, fmt.Errorf("invalid nvmeofGatewayPort %s: %w", nvmeofGatewayPortStr, err) } nvmeofData := &nvmeof.NVMeoFVolumeData{ - SubsystemNQN: params["subsystemNQN"], - NamespaceID: 0, // will be set after namespace creation - HostNQN: params["hostNQN"], + SubsystemNQN: params["subsystemNQN"], + NamespaceID: 0, // will be set after namespace creation, + NamespaceUUID: "", // will be set after namespace creation + HostNQN: params["hostNQN"], ListenerInfo: nvmeof.ListenerDetails{ GatewayAddress: nvmeof.GatewayAddress{ Address: params["listenerIpAddress"], @@ -304,6 +305,12 @@ func createNVMeoFResources( log.DebugLog(ctx, "Namespace created: %s/%s with NSID: %d", rbdPoolName, rbdImageName, nsid) nvmeofData.NamespaceID = nsid + uuid, err := gateway.GetUUIDBySubsystemAndNameSpaceID(ctx, nvmeofData.SubsystemNQN, nvmeofData.NamespaceID) + if err != nil { + return nil, fmt.Errorf("get namespace uuid failed: %w", err) + } + nvmeofData.NamespaceUUID = uuid + // Step 6: Add host to subsystem if err := gateway.AddHost(ctx, nvmeofData.SubsystemNQN, nvmeofData.HostNQN); err != nil { return nil, fmt.Errorf("host addition failed: %w", err) @@ -371,9 +378,10 @@ func cleanupNVMeoFResources( const ( // NVMe-oF resource info. - mdSubsystemNQN = ".rbd.nvmeof.SubsystemNQN" - mdNamespaceID = ".rbd.nvmeof.NamespaceID" - mdHostNQN = ".rbd.nvmeof.HostNQN" + mdSubsystemNQN = ".rbd.nvmeof.SubsystemNQN" + mdNamespaceID = ".rbd.nvmeof.NamespaceID" + mdNamespaceUUID = ".rbd.nvmeof.NamespaceUUID" + mdHostNQN = ".rbd.nvmeof.HostNQN" // Listener info. mdListenerAddress = ".rbd.nvmeof.ListenerAddress" @@ -395,6 +403,7 @@ func populateVolumeContext(volume *csi.Volume, data *nvmeof.NVMeoFVolumeData) { volume.VolumeContext[mdSubsystemNQN] = data.SubsystemNQN volume.VolumeContext[mdNamespaceID] = strconv.FormatUint(uint64(data.NamespaceID), 10) + volume.VolumeContext[mdNamespaceUUID] = data.NamespaceUUID volume.VolumeContext[mdHostNQN] = data.HostNQN volume.VolumeContext[mdListenerAddress] = data.ListenerInfo.Address volume.VolumeContext[mdListenerPort] = listenerPortStr @@ -427,9 +436,10 @@ func (cs *Server) storeNVMeoFMetadata( // Prepare all metadata entries metadata := map[string]string{ // NVMe-oF resource info - mdSubsystemNQN: nvmeofData.SubsystemNQN, - mdNamespaceID: strconv.FormatUint(uint64(nvmeofData.NamespaceID), 10), - mdHostNQN: nvmeofData.HostNQN, + mdSubsystemNQN: nvmeofData.SubsystemNQN, + mdNamespaceID: strconv.FormatUint(uint64(nvmeofData.NamespaceID), 10), + mdNamespaceUUID: nvmeofData.NamespaceUUID, + mdHostNQN: nvmeofData.HostNQN, // Listener info mdListenerAddress: nvmeofData.ListenerInfo.Address, @@ -485,6 +495,7 @@ func (cs *Server) getNVMeoFMetadata( requiredKeys := []string{ mdSubsystemNQN, mdNamespaceID, + mdNamespaceUUID, mdHostNQN, mdListenerAddress, mdListenerPort, @@ -521,9 +532,10 @@ func (cs *Server) getNVMeoFMetadata( } // Construct NVMe-oF volume data nvmeofData := &nvmeof.NVMeoFVolumeData{ - SubsystemNQN: metadata[mdSubsystemNQN], - NamespaceID: uint32(nsid), - HostNQN: metadata[mdHostNQN], + SubsystemNQN: metadata[mdSubsystemNQN], + NamespaceID: uint32(nsid), + NamespaceUUID: metadata[mdNamespaceUUID], + HostNQN: metadata[mdHostNQN], ListenerInfo: nvmeof.ListenerDetails{ GatewayAddress: nvmeof.GatewayAddress{ Address: metadata[mdListenerAddress], diff --git a/internal/nvmeof/nvmeof.go b/internal/nvmeof/nvmeof.go index e717c605484..60a830bca52 100644 --- a/internal/nvmeof/nvmeof.go +++ b/internal/nvmeof/nvmeof.go @@ -56,6 +56,7 @@ type GatewayConfig = GatewayAddress type NVMeoFVolumeData struct { SubsystemNQN string NamespaceID uint32 + NamespaceUUID string HostNQN string ListenerInfo ListenerDetails GatewayManagementInfo GatewayConfig @@ -170,6 +171,39 @@ func (gw *GatewayRpcClient) DeleteNamespace(ctx context.Context, subsystemNQN st status.GetStatus(), status.GetErrorMessage()) } +// GetUUIDBySubsystemAndNameSpaceID get the uuid of namespace by given subsystem and ns-id. +func (gw *GatewayRpcClient) GetUUIDBySubsystemAndNameSpaceID( + ctx context.Context, + subsystemNQN string, + namespaceID uint32, +) (string, error) { + req := &pb.ListNamespacesReq{ + Subsystem: subsystemNQN, + Nsid: &namespaceID, + } + resp, err := gw.client.ListNamespaces(ctx, req) + if err != nil { + return "", err + } + // Check if response is valid + if resp == nil { + return "", fmt.Errorf("received nil response from subsystem %s nsid %d", subsystemNQN, namespaceID) + } + + // Check if any namespaces found + if len(resp.GetNamespaces()) == 0 { + return "", fmt.Errorf("no namespaces found for subsystem %s nsid %d", subsystemNQN, namespaceID) + } + // extra validation + for _, ns := range resp.GetNamespaces() { + if ns.GetNsid() == namespaceID { + return ns.GetUuid(), nil + } + } + + return "", fmt.Errorf("namespace with nsid %d not found", namespaceID) +} + // CreateSubsystem creates an NVMe-oF subsystem on the gateway. func (gw *GatewayRpcClient) CreateSubsystem(ctx context.Context, subsystemNQN string) error { log.DebugLog(ctx, "Creating NVMe subsystem: %s on gateway %s", From c1192d2485f3d4e4ab627677c79262ca83ac58cf Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Sun, 17 Aug 2025 08:59:48 +0300 Subject: [PATCH 024/157] nvmeof: makes AddHost,createListener, CreateSubsystem and AddNamespace idempotent dont return an error, due to k8s can retrigger CreateVolume with same request. Signed-off-by: gadi-didi --- internal/nvmeof/nvmeof.go | 69 ++++++++++++++++++++++++++++++++------- 1 file changed, 58 insertions(+), 11 deletions(-) diff --git a/internal/nvmeof/nvmeof.go b/internal/nvmeof/nvmeof.go index 60a830bca52..10acd039961 100644 --- a/internal/nvmeof/nvmeof.go +++ b/internal/nvmeof/nvmeof.go @@ -132,10 +132,12 @@ func (gw *GatewayRpcClient) CreateNamespace(ctx context.Context, subsystemNQN, p } resp, err := gw.client.NamespaceAdd(ctx, req) - if err != nil { + switch { + case err != nil: return 0, fmt.Errorf("failed to create namespace for %s/%s: %w", poolName, imageName, err) - } - if resp.GetStatus() != 0 { + case resp.GetStatus() == int32(syscall.EEXIST): + return gw.findExistingNamespace(ctx, subsystemNQN, poolName, imageName) + case resp.GetStatus() != 0: return 0, fmt.Errorf("gateway NamespaceAdd returned error: %s", resp.GetErrorMessage()) } log.DebugLog(ctx, "Namespace created with NSID: %d", resp.GetNsid()) @@ -225,10 +227,13 @@ func (gw *GatewayRpcClient) CreateSubsystem(ctx context.Context, subsystemNQN st } status, err := gw.client.CreateSubsystem(ctx, req) - if err != nil { + switch { + case err != nil: return fmt.Errorf("failed to create subsystem %s: %w", subsystemNQN, err) - } - if status.GetStatus() != 0 { + case status.GetStatus() == int32(syscall.EEXIST): + // subsystem was already created + return nil + case status.GetStatus() != 0: return fmt.Errorf("gateway CreateSubsystem returned error: %s", status.GetErrorMessage()) } @@ -306,12 +311,18 @@ func (gw *GatewayRpcClient) AddHost(ctx context.Context, subsystemNQN, hostNQN s if err != nil { return fmt.Errorf("failed to add host %s to subsystem %s: %w", hostNQN, subsystemNQN, err) } - if resp.GetStatus() != 0 { - return fmt.Errorf("gateway AddHost returned error (status=%d): %s", resp.GetStatus(), resp.GetErrorMessage()) + if resp.GetStatus() == 0 { + log.DebugLog(ctx, "Host added successfully: %s to subsystem %s", hostNQN, subsystemNQN) + + return nil } - log.DebugLog(ctx, "Host added successfully: %s to subsystem %s", hostNQN, subsystemNQN) + if resp.GetStatus() == int32(syscall.EEXIST) { // EEXIST + log.DebugLog(ctx, "Host %s already added to subsystem %s", hostNQN, subsystemNQN) - return nil + return nil // Host already added, no error + } + + return fmt.Errorf("gateway AddHost returned error (status=%d): %s", resp.GetStatus(), resp.GetErrorMessage()) } func (gw *GatewayRpcClient) CreateListener(ctx context.Context, subsystemNQN string, listenerInfo ListenerDetails, @@ -332,10 +343,14 @@ func (gw *GatewayRpcClient) CreateListener(ctx context.Context, subsystemNQN str if err != nil { return fmt.Errorf("failed to add listener %s to subsystem %s: %w", listenerInfo.Address, subsystemNQN, err) } + if resp.GetStatus() == int32(syscall.EEXIST) { // EEXIST + log.DebugLog(ctx, "Listener %s already created for subsystem %s", listenerInfo.Address, subsystemNQN) + + return nil // Listener already created, no error + } if resp.GetStatus() != 0 { return fmt.Errorf("gateway AddListener returned error (status=%d): %s", resp.GetStatus(), resp.GetErrorMessage()) } - log.DebugLog(ctx, "Listener added successfully: %s to subsystem %s", listenerInfo.Address, subsystemNQN) return nil @@ -450,3 +465,35 @@ func (c *GatewayRpcClient) generateSerialNumber() (string, error) { return fmt.Sprintf("Ceph%d", serial), nil } + +// findExistingNamespace searches for a namespace matching the given pool and image names +// Returns the NSID if found, or an error if not found or on failure. +func (gw *GatewayRpcClient) findExistingNamespace( + ctx context.Context, + subsystemNQN, + poolName, + imageName string, +) (uint32, error) { + r, err := gw.client.ListNamespaces(ctx, &pb.ListNamespacesReq{Subsystem: subsystemNQN}) + if err != nil { + return 0, fmt.Errorf("failed to get namespaces in subsystem %s: %w", subsystemNQN, err) + } + + if r.GetStatus() != 0 { + return 0, fmt.Errorf("could not get namespaces in subsystem %s (status=%d): %s", + subsystemNQN, r.GetStatus(), r.GetErrorMessage()) + } + + if len(r.GetNamespaces()) == 0 { + return 0, fmt.Errorf("no existing namespaces in subsystem %s", subsystemNQN) + } + + for _, ns := range r.GetNamespaces() { + if ns.GetRbdPoolName() == poolName && ns.GetRbdImageName() == imageName { + return ns.GetNsid(), nil + } + } + + return 0, fmt.Errorf("could not find existing namespace for %s/%s in subsystem %s", + poolName, imageName, subsystemNQN) +} From 764648cbc5cc08851cfab7a84cd3e121d917bea8 Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Wed, 20 Aug 2025 12:15:48 +0300 Subject: [PATCH 025/157] nvmeof: remove common prefix "rbd.nvmeof." in volumeContext remove "rbd.nvmeof." prefix Signed-off-by: gadi-didi --- .../nvmeof/controller/controllerserver.go | 96 ++++++++++--------- 1 file changed, 51 insertions(+), 45 deletions(-) diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go index bde6e7d0097..2f3c831bf7f 100644 --- a/internal/nvmeof/controller/controllerserver.go +++ b/internal/nvmeof/controller/controllerserver.go @@ -376,23 +376,29 @@ func cleanupNVMeoFResources( return nil } +// VolumeContext metadata keys. const ( // NVMe-oF resource info. - mdSubsystemNQN = ".rbd.nvmeof.SubsystemNQN" - mdNamespaceID = ".rbd.nvmeof.NamespaceID" - mdNamespaceUUID = ".rbd.nvmeof.NamespaceUUID" - mdHostNQN = ".rbd.nvmeof.HostNQN" + vcSubsystemNQN = "SubsystemNQN" + vcNamespaceID = "NamespaceID" + vcNamespaceUUID = "NamespaceUUID" + vcHostNQN = "HostNQN" // Listener info. - mdListenerAddress = ".rbd.nvmeof.ListenerAddress" - mdListenerPort = ".rbd.nvmeof.ListenerPort" - mdListenerHostname = ".rbd.nvmeof.ListenerHostname" + vcListenerAddress = "ListenerAddress" + vcListenerPort = "ListenerPort" + vcListenerHostname = "ListenerHostname" // Gateway management info. - mdGatewayAddress = ".rbd.nvmeof.GatewayAddress" - mdGatewayPort = ".rbd.nvmeof.GatewayPort" + vcGatewayAddress = "GatewayAddress" + vcGatewayPort = "GatewayPort" ) +// toRBDMetadataKey converts clean volume context key to prefixed RBD metadata key. +func toRBDMetadataKey(vcKey string) string { + return ".rbd.nvmeof." + vcKey +} + // populateVolumeContext adds NVMe-oF information to volume context for NodeServer. func populateVolumeContext(volume *csi.Volume, data *nvmeof.NVMeoFVolumeData) { if volume.VolumeContext == nil { @@ -401,15 +407,15 @@ func populateVolumeContext(volume *csi.Volume, data *nvmeof.NVMeoFVolumeData) { listenerPortStr := strconv.FormatUint(uint64(data.ListenerInfo.Port), 10) gatewayManagementInfoPortStr := strconv.FormatUint(uint64(data.GatewayManagementInfo.Port), 10) - volume.VolumeContext[mdSubsystemNQN] = data.SubsystemNQN - volume.VolumeContext[mdNamespaceID] = strconv.FormatUint(uint64(data.NamespaceID), 10) - volume.VolumeContext[mdNamespaceUUID] = data.NamespaceUUID - volume.VolumeContext[mdHostNQN] = data.HostNQN - volume.VolumeContext[mdListenerAddress] = data.ListenerInfo.Address - volume.VolumeContext[mdListenerPort] = listenerPortStr - volume.VolumeContext[mdListenerHostname] = data.ListenerInfo.Hostname - volume.VolumeContext[mdGatewayAddress] = data.GatewayManagementInfo.Address - volume.VolumeContext[mdGatewayPort] = gatewayManagementInfoPortStr + volume.VolumeContext[vcSubsystemNQN] = data.SubsystemNQN + volume.VolumeContext[vcNamespaceID] = strconv.FormatUint(uint64(data.NamespaceID), 10) + volume.VolumeContext[vcNamespaceUUID] = data.NamespaceUUID + volume.VolumeContext[vcHostNQN] = data.HostNQN + volume.VolumeContext[vcListenerAddress] = data.ListenerInfo.Address + volume.VolumeContext[vcListenerPort] = listenerPortStr + volume.VolumeContext[vcListenerHostname] = data.ListenerInfo.Hostname + volume.VolumeContext[vcGatewayAddress] = data.GatewayManagementInfo.Address + volume.VolumeContext[vcGatewayPort] = gatewayManagementInfoPortStr } // storeNVMeoFMetadata stores all NVMe-oF data in RBD volume metadata for cleanup operations. @@ -436,19 +442,19 @@ func (cs *Server) storeNVMeoFMetadata( // Prepare all metadata entries metadata := map[string]string{ // NVMe-oF resource info - mdSubsystemNQN: nvmeofData.SubsystemNQN, - mdNamespaceID: strconv.FormatUint(uint64(nvmeofData.NamespaceID), 10), - mdNamespaceUUID: nvmeofData.NamespaceUUID, - mdHostNQN: nvmeofData.HostNQN, + toRBDMetadataKey(vcSubsystemNQN): nvmeofData.SubsystemNQN, + toRBDMetadataKey(vcNamespaceID): strconv.FormatUint(uint64(nvmeofData.NamespaceID), 10), + toRBDMetadataKey(vcNamespaceUUID): nvmeofData.NamespaceUUID, + toRBDMetadataKey(vcHostNQN): nvmeofData.HostNQN, // Listener info - mdListenerAddress: nvmeofData.ListenerInfo.Address, - mdListenerPort: listenerInfoPortStr, - mdListenerHostname: nvmeofData.ListenerInfo.Hostname, + toRBDMetadataKey(vcListenerAddress): nvmeofData.ListenerInfo.Address, + toRBDMetadataKey(vcListenerPort): listenerInfoPortStr, + toRBDMetadataKey(vcListenerHostname): nvmeofData.ListenerInfo.Hostname, // Gateway management info - mdGatewayAddress: nvmeofData.GatewayManagementInfo.Address, - mdGatewayPort: gatewayManagementInfoPortStr, + toRBDMetadataKey(vcGatewayAddress): nvmeofData.GatewayManagementInfo.Address, + toRBDMetadataKey(vcGatewayPort): gatewayManagementInfoPortStr, } // Store all metadata entries @@ -493,15 +499,15 @@ func (cs *Server) getNVMeoFMetadata( // Required metadata keys requiredKeys := []string{ - mdSubsystemNQN, - mdNamespaceID, - mdNamespaceUUID, - mdHostNQN, - mdListenerAddress, - mdListenerPort, - mdListenerHostname, - mdGatewayAddress, - mdGatewayPort, + toRBDMetadataKey(vcSubsystemNQN), + toRBDMetadataKey(vcNamespaceID), + toRBDMetadataKey(vcNamespaceUUID), + toRBDMetadataKey(vcHostNQN), + toRBDMetadataKey(vcListenerAddress), + toRBDMetadataKey(vcListenerPort), + toRBDMetadataKey(vcListenerHostname), + toRBDMetadataKey(vcGatewayAddress), + toRBDMetadataKey(vcGatewayPort), } // Retrieve all metadata values @@ -517,35 +523,35 @@ func (cs *Server) getNVMeoFMetadata( } // Parse namespace ID - nsid, err := strconv.ParseUint(metadata[mdNamespaceID], 10, 32) + nsid, err := strconv.ParseUint(metadata[toRBDMetadataKey(vcNamespaceID)], 10, 32) if err != nil { return nil, fmt.Errorf("invalid namespace ID: %w", err) } - listenerInfoPort, err := strconv.ParseUint(metadata[mdListenerPort], 10, 32) + listenerInfoPort, err := strconv.ParseUint(metadata[toRBDMetadataKey(vcListenerPort)], 10, 32) if err != nil { return nil, fmt.Errorf("invalid listener port: %w", err) } - gatewayPort, err := strconv.ParseUint(metadata[mdGatewayPort], 10, 32) + gatewayPort, err := strconv.ParseUint(metadata[toRBDMetadataKey(vcGatewayPort)], 10, 32) if err != nil { return nil, fmt.Errorf("invalid gateway port: %w", err) } // Construct NVMe-oF volume data nvmeofData := &nvmeof.NVMeoFVolumeData{ - SubsystemNQN: metadata[mdSubsystemNQN], + SubsystemNQN: metadata[toRBDMetadataKey(vcSubsystemNQN)], NamespaceID: uint32(nsid), - NamespaceUUID: metadata[mdNamespaceUUID], - HostNQN: metadata[mdHostNQN], + NamespaceUUID: metadata[toRBDMetadataKey(vcNamespaceUUID)], + HostNQN: metadata[toRBDMetadataKey(vcHostNQN)], ListenerInfo: nvmeof.ListenerDetails{ GatewayAddress: nvmeof.GatewayAddress{ - Address: metadata[mdListenerAddress], + Address: metadata[toRBDMetadataKey(vcListenerAddress)], Port: uint32(listenerInfoPort), }, - Hostname: metadata[mdListenerHostname], + Hostname: metadata[toRBDMetadataKey(vcListenerHostname)], }, // Store gateway management info separately GatewayManagementInfo: nvmeof.GatewayConfig{ - Address: metadata[mdGatewayAddress], + Address: metadata[toRBDMetadataKey(vcGatewayAddress)], Port: uint32(gatewayPort), }, } From 99579fdb14a3b370758b78bc2eb452d90d5d360a Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Wed, 20 Aug 2025 14:40:37 +0300 Subject: [PATCH 026/157] nvmeof: move NVMeoFVolumeData to volume.go creates volume.go file. it will in charge of metadata stuff Signed-off-by: gadi-didi --- internal/nvmeof/nvmeof.go | 10 ---------- internal/nvmeof/volume.go | 27 +++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 10 deletions(-) create mode 100644 internal/nvmeof/volume.go diff --git a/internal/nvmeof/nvmeof.go b/internal/nvmeof/nvmeof.go index 10acd039961..049815ac227 100644 --- a/internal/nvmeof/nvmeof.go +++ b/internal/nvmeof/nvmeof.go @@ -52,16 +52,6 @@ type ListenerDetails struct { // Config holds gateway client configuration. type GatewayConfig = GatewayAddress -// NVMeoFVolumeData holds the data required for an NVMe-oF volume. -type NVMeoFVolumeData struct { - SubsystemNQN string - NamespaceID uint32 - NamespaceUUID string - HostNQN string - ListenerInfo ListenerDetails - GatewayManagementInfo GatewayConfig -} - // GatewayClient wraps the gRPC client and connection details. type GatewayRpcClient struct { config *GatewayConfig diff --git a/internal/nvmeof/volume.go b/internal/nvmeof/volume.go new file mode 100644 index 00000000000..e925ececb97 --- /dev/null +++ b/internal/nvmeof/volume.go @@ -0,0 +1,27 @@ +/* +Copyright 2025 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nvmeof + +// NVMeoFVolumeData holds the data required for an NVMe-oF volume. +type NVMeoFVolumeData struct { + SubsystemNQN string + NamespaceID uint32 + NamespaceUUID string + HostNQN string + ListenerInfo ListenerDetails + GatewayManagementInfo GatewayConfig +} From 6b1c8a9b598405e804a62c94397890f0382d1c8d Mon Sep 17 00:00:00 2001 From: Skala Networks Date: Thu, 21 Aug 2025 09:31:09 +0000 Subject: [PATCH 027/157] rbd: fix bug where volumereplication promote/demote would fail If the source and destination clusters use different clusterIDs, promoting/demoting a volumereplication would fail due to the clusterIDmapping logic not running. This fix enables clusterIDmapping during volume generation to fix it. Signed-off-by: Skala Networks --- internal/rbd/group/util.go | 6 ++++-- internal/rbd/group/util_test.go | 5 +++++ internal/rbd/rbd_util.go | 2 ++ internal/rbd/rbd_util_test.go | 5 +++++ internal/util/csiconfig.go | 2 +- internal/util/errors.go | 2 ++ 6 files changed, 19 insertions(+), 3 deletions(-) diff --git a/internal/rbd/group/util.go b/internal/rbd/group/util.go index 3af9986be25..20e8d9cb1e3 100644 --- a/internal/rbd/group/util.go +++ b/internal/rbd/group/util.go @@ -434,8 +434,9 @@ func (cvg *commonVolumeGroup) GetCreationTime(ctx context.Context) (*time.Time, // volumegroups should continue based on the type of error encountered. // // It checks if the given error matches any of the following known errors: -// - ErrPoolNotFound: The rbd pool where the volumegroup/omap is expected doesn't exist. -// - ErrGroupNotFound: The volumegroup doesn't exist in the rbd pool. +// - util.ErrPoolNotFound: The rbd pool where the volumegroup/omap is expected doesn't exist. +// - util.ErrConfigNotFound: No configuration exists for the cluster ID associated with the volumegroup. +// - rbderrors.ErrGroupNotFound: The volumegroup doesn't exist in the rbd pool. // - rados.ErrPermissionDenied: Permissions to access the pool is denied. // // If any of these errors are encountered, the function returns `true`, indicating @@ -451,6 +452,7 @@ func ShouldRetryVolumeGroupGeneration(err error) bool { } // Continue searching for specific known errors return (errors.Is(err, util.ErrPoolNotFound) || + errors.Is(err, util.ErrConfigNotFound) || errors.Is(err, rbderrors.ErrGroupNotFound) || errors.Is(err, rados.ErrPermissionDenied)) } diff --git a/internal/rbd/group/util_test.go b/internal/rbd/group/util_test.go index f0837a23e41..49d1b8b922c 100644 --- a/internal/rbd/group/util_test.go +++ b/internal/rbd/group/util_test.go @@ -46,6 +46,11 @@ func Test_shouldRetryVolumeGroupGeneration(t *testing.T) { args: args{err: util.ErrPoolNotFound}, want: true, // Known error, continue searching }, + { + name: "ErrConfigNotFound (continue searching)", + args: args{err: util.ErrConfigNotFound}, + want: true, // Known error, continue searching + }, { name: "ErrRBDGroupNotFound (continue searching)", args: args{err: rbderrors.ErrGroupNotFound}, diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index 5119000efe4..c7d86766862 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -1260,6 +1260,7 @@ func generateVolumeFromVolumeID( // It checks if the given error matches any of the following known errors: // - util.ErrKeyNotFound: The key required to locate the volume is missing in Rados omap. // - util.ErrPoolNotFound: The rbd pool where the volume/omap is expected doesn't exist. +// - util.ErrConfigNotFound: No configuration exists for the cluster ID associated with the volume. // - ErrImageNotFound: The image doesn't exist in the rbd pool. // - rados.ErrPermissionDenied: Permissions to access the pool is denied. // @@ -1277,6 +1278,7 @@ func ShouldRetryVolumeGeneration(err error) bool { // Continue searching for specific known errors return (errors.Is(err, util.ErrKeyNotFound) || errors.Is(err, util.ErrPoolNotFound) || + errors.Is(err, util.ErrConfigNotFound) || errors.Is(err, rbderrors.ErrImageNotFound) || errors.Is(err, rados.ErrPermissionDenied)) } diff --git a/internal/rbd/rbd_util_test.go b/internal/rbd/rbd_util_test.go index f75bef83cc9..f66bb4653af 100644 --- a/internal/rbd/rbd_util_test.go +++ b/internal/rbd/rbd_util_test.go @@ -414,6 +414,11 @@ func Test_shouldRetryVolumeGeneration(t *testing.T) { args: args{err: util.ErrPoolNotFound}, want: true, // Known error, continue searching }, + { + name: "ErrConfigNotFound (continue searching)", + args: args{err: util.ErrConfigNotFound}, + want: true, // Known error, continue searching + }, { name: "ErrImageNotFound (continue searching)", args: args{err: rbderrors.ErrImageNotFound}, diff --git a/internal/util/csiconfig.go b/internal/util/csiconfig.go index cce3c81f31a..1effa9caa69 100644 --- a/internal/util/csiconfig.go +++ b/internal/util/csiconfig.go @@ -83,7 +83,7 @@ func readClusterInfo(pathToConfig, clusterID string) (*kubernetes.ClusterInfo, e } } - return nil, fmt.Errorf("missing configuration for cluster ID %q", clusterID) + return nil, fmt.Errorf("%w: %q", ErrConfigNotFound, clusterID) } // Mons returns a comma separated MON list from the csi config for the given clusterID. diff --git a/internal/util/errors.go b/internal/util/errors.go index ed821769a07..ab0f81e97f5 100644 --- a/internal/util/errors.go +++ b/internal/util/errors.go @@ -34,4 +34,6 @@ var ( ErrClusterIDNotSet = errors.New("clusterID must be set") // ErrMissingConfigForMonitor is returned when clusterID is not found for the mon. ErrMissingConfigForMonitor = errors.New("missing configuration of cluster ID for monitor") + // ErrConfigNotFound is returned when no configuration is found for a cluster ID. + ErrConfigNotFound = errors.New("missing configuration for cluster ID") ) From fb82d701c790360ecc65f675135703435091c95c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 03:35:20 +0000 Subject: [PATCH 028/157] rebase: bump docker/login-action from 3.4.0 to 3.5.0 Bumps [docker/login-action](https://github.com/docker/login-action) from 3.4.0 to 3.5.0. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/74a5d142397b4f367a81961eba4e8cd7edddf772...184bdaa0721073962dff0199f1fb9940f07167d1) --- updated-dependencies: - dependency-name: docker/login-action dependency-version: 3.5.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/publish-artifacts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish-artifacts.yaml b/.github/workflows/publish-artifacts.yaml index ef8e272b5d7..4770a9d0989 100644 --- a/.github/workflows/publish-artifacts.yaml +++ b/.github/workflows/publish-artifacts.yaml @@ -23,7 +23,7 @@ jobs: - name: Login to Quay # yamllint disable-line rule:line-length - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0 with: registry: quay.io username: ${{ secrets.QUAY_IO_USERNAME }} From 0be36b736608ceb92bee3d89060995b46b5b8737 Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Wed, 27 Aug 2025 13:10:54 +0300 Subject: [PATCH 029/157] util: added validate methods added basic ValidateControllerPublishVolumeRequest and ValidateControllerUnpublishVolumeRequest Signed-off-by: gadi-didi --- internal/util/validate.go | 45 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/internal/util/validate.go b/internal/util/validate.go index dbd8fabfd7d..2fc77a06c50 100644 --- a/internal/util/validate.go +++ b/internal/util/validate.go @@ -1,3 +1,19 @@ +/* +Copyright 2025 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package util import ( @@ -6,6 +22,35 @@ import ( "google.golang.org/grpc/status" ) +// ValidateControllerPublishVolumeRequest validates the controller publish request. +func ValidateControllerPublishVolumeRequest(req *csi.ControllerPublishVolumeRequest) error { + if req.GetVolumeId() == "" { + return status.Error(codes.InvalidArgument, "volume ID missing in request") + } + + if req.GetVolumeCapability() == nil { + return status.Error(codes.InvalidArgument, "volume capability missing in request") + } + if req.GetNodeId() == "" { + return status.Error(codes.InvalidArgument, "node ID missing in request") + } + + return nil +} + +// ValidateControllerUnpublishVolumeRequest validates the controller unpublish request. +func ValidateControllerUnpublishVolumeRequest(req *csi.ControllerUnpublishVolumeRequest) error { + if req.GetVolumeId() == "" { + return status.Error(codes.InvalidArgument, "volume ID missing in request") + } + + if req.GetNodeId() == "" { + return status.Error(codes.InvalidArgument, "node ID missing in request") + } + + return nil +} + // ValidateNodeStageVolumeRequest validates the node stage request. func ValidateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error { if req.GetVolumeCapability() == nil { From a13bfc0ab3ba9a112ae0433d34fd94051c8731ff Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Wed, 27 Aug 2025 13:36:27 +0300 Subject: [PATCH 030/157] nvmeof: remove AddHost remove the AddHost GRPC and remove HostNQN field from NVMeoFVolumeData struct Signed-off-by: gadi-didi --- .../nvmeof/controller/controllerserver.go | 26 +++---------------- internal/nvmeof/tests/nvmeof_test.go | 9 +++---- internal/nvmeof/volume.go | 1 - 3 files changed, 8 insertions(+), 28 deletions(-) diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go index 2f3c831bf7f..c3e69bcf5bf 100644 --- a/internal/nvmeof/controller/controllerserver.go +++ b/internal/nvmeof/controller/controllerserver.go @@ -168,7 +168,7 @@ func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error { // Validate required parameters params := req.GetParameters() requiredParams := []string{ - "subsystemNQN", "hostNQN", "nvmeofGatewayAddress", "nvmeofGatewayPort", + "subsystemNQN", "nvmeofGatewayAddress", "nvmeofGatewayPort", "listenerIpAddress", "listenerPort", "listenerHostname", } for _, param := range requiredParams { @@ -255,7 +255,6 @@ func createNVMeoFResources( SubsystemNQN: params["subsystemNQN"], NamespaceID: 0, // will be set after namespace creation, NamespaceUUID: "", // will be set after namespace creation - HostNQN: params["hostNQN"], ListenerInfo: nvmeof.ListenerDetails{ GatewayAddress: nvmeof.GatewayAddress{ Address: params["listenerIpAddress"], @@ -311,12 +310,6 @@ func createNVMeoFResources( } nvmeofData.NamespaceUUID = uuid - // Step 6: Add host to subsystem - if err := gateway.AddHost(ctx, nvmeofData.SubsystemNQN, nvmeofData.HostNQN); err != nil { - return nil, fmt.Errorf("host addition failed: %w", err) - } - log.DebugLog(ctx, "Host added: %s to subsystem %s", nvmeofData.HostNQN, nvmeofData.SubsystemNQN) - return nvmeofData, nil } @@ -345,21 +338,14 @@ func cleanupNVMeoFResources( // there is no relevant to continue , will make it simple ? // instead of check in the std error if "not found".. - // Step 2: Remove host from subsystem - if err := gateway.RemoveHost(ctx, nvmeofData.SubsystemNQN, nvmeofData.HostNQN); err != nil { - return fmt.Errorf("failed to remove host %s from subsystem %s: %w", - nvmeofData.HostNQN, nvmeofData.SubsystemNQN, err) - } - log.DebugLog(ctx, "Host %s removed from subsystem %s", nvmeofData.HostNQN, nvmeofData.SubsystemNQN) - - // Step 3: Delete namespace + // Step 2: Delete namespace if err := gateway.DeleteNamespace(ctx, nvmeofData.SubsystemNQN, nvmeofData.NamespaceID); err != nil { return fmt.Errorf("failed to delete namespace %d for subsystem %s: %w", nvmeofData.NamespaceID, nvmeofData.SubsystemNQN, err) } log.DebugLog(ctx, "Namespace %d deleted for subsystem %s", nvmeofData.NamespaceID, nvmeofData.SubsystemNQN) - // Step 4: Delete listener + // Step 3: Delete listener err = gateway.DeleteListener(ctx, nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) if err != nil { return fmt.Errorf("failed to delete listener for subsystem %s: %w", nvmeofData.SubsystemNQN, err) @@ -367,7 +353,7 @@ func cleanupNVMeoFResources( log.DebugLog(ctx, "Listener deleted for subsystem %s at %s", nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) - // Step 5: Cleanup empty subsystem + // Step 4: Cleanup empty subsystem if err := cleanupEmptySubsystem(ctx, gateway, nvmeofData.SubsystemNQN); err != nil { return fmt.Errorf("failed to cleanup empty subsystem %s: %w", nvmeofData.SubsystemNQN, err) } @@ -410,7 +396,6 @@ func populateVolumeContext(volume *csi.Volume, data *nvmeof.NVMeoFVolumeData) { volume.VolumeContext[vcSubsystemNQN] = data.SubsystemNQN volume.VolumeContext[vcNamespaceID] = strconv.FormatUint(uint64(data.NamespaceID), 10) volume.VolumeContext[vcNamespaceUUID] = data.NamespaceUUID - volume.VolumeContext[vcHostNQN] = data.HostNQN volume.VolumeContext[vcListenerAddress] = data.ListenerInfo.Address volume.VolumeContext[vcListenerPort] = listenerPortStr volume.VolumeContext[vcListenerHostname] = data.ListenerInfo.Hostname @@ -445,7 +430,6 @@ func (cs *Server) storeNVMeoFMetadata( toRBDMetadataKey(vcSubsystemNQN): nvmeofData.SubsystemNQN, toRBDMetadataKey(vcNamespaceID): strconv.FormatUint(uint64(nvmeofData.NamespaceID), 10), toRBDMetadataKey(vcNamespaceUUID): nvmeofData.NamespaceUUID, - toRBDMetadataKey(vcHostNQN): nvmeofData.HostNQN, // Listener info toRBDMetadataKey(vcListenerAddress): nvmeofData.ListenerInfo.Address, @@ -502,7 +486,6 @@ func (cs *Server) getNVMeoFMetadata( toRBDMetadataKey(vcSubsystemNQN), toRBDMetadataKey(vcNamespaceID), toRBDMetadataKey(vcNamespaceUUID), - toRBDMetadataKey(vcHostNQN), toRBDMetadataKey(vcListenerAddress), toRBDMetadataKey(vcListenerPort), toRBDMetadataKey(vcListenerHostname), @@ -541,7 +524,6 @@ func (cs *Server) getNVMeoFMetadata( SubsystemNQN: metadata[toRBDMetadataKey(vcSubsystemNQN)], NamespaceID: uint32(nsid), NamespaceUUID: metadata[toRBDMetadataKey(vcNamespaceUUID)], - HostNQN: metadata[toRBDMetadataKey(vcHostNQN)], ListenerInfo: nvmeof.ListenerDetails{ GatewayAddress: nvmeof.GatewayAddress{ Address: metadata[toRBDMetadataKey(vcListenerAddress)], diff --git a/internal/nvmeof/tests/nvmeof_test.go b/internal/nvmeof/tests/nvmeof_test.go index 21f1013d1bd..da5fdf3cd63 100644 --- a/internal/nvmeof/tests/nvmeof_test.go +++ b/internal/nvmeof/tests/nvmeof_test.go @@ -78,7 +78,6 @@ func TestRealGateway(t *testing.T) { nvmeofData := &nvmeof.NVMeoFVolumeData{ SubsystemNQN: testNQN, NamespaceID: 0, // will be set after namespace creation - HostNQN: hostNQN, ListenerInfo: nvmeof.ListenerDetails{ GatewayAddress: nvmeof.GatewayAddress{ Address: config.Address, @@ -119,9 +118,9 @@ func TestRealGateway(t *testing.T) { t.Logf("✓ Subsystem created: %s", nvmeofData.SubsystemNQN) // Test add host - err = client.AddHost(ctx, nvmeofData.SubsystemNQN, nvmeofData.HostNQN) + err = client.AddHost(ctx, nvmeofData.SubsystemNQN, hostNQN) require.NoError(t, err) - t.Logf("✓ Host added: %s to subsystem %s", nvmeofData.HostNQN, nvmeofData.SubsystemNQN) + t.Logf("✓ Host added: %s to subsystem %s", hostNQN, nvmeofData.SubsystemNQN) // Test check subsystem exists exists, err := client.SubsystemExists(ctx, nvmeofData.SubsystemNQN) @@ -140,9 +139,9 @@ func TestRealGateway(t *testing.T) { t.Logf("✓ Listener deleted for subsystem %s at %s", testNQN, config) // Test remove host - err = client.RemoveHost(ctx, testNQN, nvmeofData.HostNQN) + err = client.RemoveHost(ctx, testNQN, hostNQN) require.NoError(t, err) - t.Logf("✓ Host removed: %s from subsystem %s", nvmeofData.HostNQN, testNQN) + t.Logf("✓ Host removed: %s from subsystem %s", hostNQN, testNQN) // Test create namespace // poolName := "mypool" diff --git a/internal/nvmeof/volume.go b/internal/nvmeof/volume.go index e925ececb97..694a67e60e6 100644 --- a/internal/nvmeof/volume.go +++ b/internal/nvmeof/volume.go @@ -21,7 +21,6 @@ type NVMeoFVolumeData struct { SubsystemNQN string NamespaceID uint32 NamespaceUUID string - HostNQN string ListenerInfo ListenerDetails GatewayManagementInfo GatewayConfig } From be5fdfc615da9991d819a7e1d0523e6ac4845eb6 Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Thu, 28 Aug 2025 15:31:15 +0300 Subject: [PATCH 031/157] nvmeof: change getNVMeofMetada argument instead of getting the DeleteVolumeRequest, getNVMeofMetada gets the secrets itself. Signed-off-by: gadi-didi --- internal/nvmeof/controller/controllerserver.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go index c3e69bcf5bf..df3d19fa61a 100644 --- a/internal/nvmeof/controller/controllerserver.go +++ b/internal/nvmeof/controller/controllerserver.go @@ -146,7 +146,7 @@ func (cs *Server) DeleteVolume( defer cs.volumeLocks.Release(volumeID) // Get NVMe-oF metadata for cleanup - nvmeofData, err := cs.getNVMeoFMetadata(ctx, req, volumeID) + nvmeofData, err := cs.getNVMeoFMetadata(ctx, req.GetSecrets(), volumeID) if err != nil { log.DebugLog(ctx, "No NVMe-oF metadata found, skipping NVMe-oF cleanup: %v", err) } else { @@ -464,11 +464,11 @@ func (cs *Server) storeNVMeoFMetadata( // getNVMeoFMetadata retrieves all NVMe-oF data from RBD volume metadata. func (cs *Server) getNVMeoFMetadata( ctx context.Context, - req *csi.DeleteVolumeRequest, + secrets map[string]string, volumeID string, ) (*nvmeof.NVMeoFVolumeData, error) { // Create RBD manager - mgr := rbdutil.NewManager(cs.backendServer.Driver.GetInstanceID(), nil, req.GetSecrets()) + mgr := rbdutil.NewManager(cs.backendServer.Driver.GetInstanceID(), nil, secrets) defer mgr.Destroy(ctx) // Get RBD volume From 02ac22b774fc02fcaa67b6f47f204ca0e6d2d7f9 Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Thu, 28 Aug 2025 15:35:07 +0300 Subject: [PATCH 032/157] nvmeof: add Controller[Un]PublishVolume implemented adding host (retrieve hostnqn from nodeID) GRPC call. Signed-off-by: gadi-didi --- .../nvmeof/controller/controllerserver.go | 200 ++++++++++++++++++ 1 file changed, 200 insertions(+) diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go index df3d19fa61a..36ebddae75b 100644 --- a/internal/nvmeof/controller/controllerserver.go +++ b/internal/nvmeof/controller/controllerserver.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "strconv" + "strings" "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" @@ -32,6 +33,7 @@ import ( rbdutil "github.com/ceph/ceph-csi/internal/rbd" rbddriver "github.com/ceph/ceph-csi/internal/rbd/driver" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/k8s" "github.com/ceph/ceph-csi/internal/util/log" ) @@ -162,6 +164,84 @@ func (cs *Server) DeleteVolume( return cs.backendServer.DeleteVolume(ctx, req) } +// ControllerPublishVolume handles the publishing of a volume (run Add host GRPC). +func (cs *Server) ControllerPublishVolume( + ctx context.Context, + req *csi.ControllerPublishVolumeRequest, +) (*csi.ControllerPublishVolumeResponse, error) { + // Validate request + if err := validatePublishVolumeRequest(req); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "request validation failed: %v", err) + } + + volumeID := req.GetVolumeId() + if acquired := cs.volumeLocks.TryAcquire(volumeID); !acquired { + return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) + } + defer cs.volumeLocks.Release(volumeID) + + // Publish NVMe-oF resources + hostNqn, err := publishResources(ctx, req) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to publish resources: %v", err) + } + + // populate publish context + publishContext := populatePublishContext(req, hostNqn) + + return &csi.ControllerPublishVolumeResponse{ + PublishContext: publishContext, + }, nil +} + +func (cs *Server) ControllerUnpublishVolume( + ctx context.Context, + req *csi.ControllerUnpublishVolumeRequest, +) (*csi.ControllerUnpublishVolumeResponse, error) { + // Validate request + if err := util.ValidateControllerUnpublishVolumeRequest(req); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "request validation failed: %v", err) + } + + volumeID := req.GetVolumeId() + nodeID := req.GetNodeId() + if acquired := cs.volumeLocks.TryAcquire(volumeID); !acquired { + return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID) + } + defer cs.volumeLocks.Release(volumeID) + + // Since ControllerUnpublishVolume doesn't receive volume context, + // we need to retrieve it from the volume metadata stored during CreateVolume + secrets := req.GetSecrets() + if secrets == nil { + secretName, secretNamespace, err := util.GetControllerPublishSecretRef(req.GetVolumeId(), util.RBDType) + if err != nil { + log.WarningLog(ctx, "controller publish secret not found: %v", err) + + return &csi.ControllerUnpublishVolumeResponse{}, nil + } + + secrets, err = k8s.GetSecret(secretName, secretNamespace) + if err != nil { + return nil, fmt.Errorf("failed to get controller publish secret from k8s: %w", err) + } + } + + nvmeofData, err := cs.getNVMeoFMetadata(ctx, secrets, volumeID) + if err != nil { + log.ErrorLog(ctx, "failed to get NVMe-oF metadata for volumeID %s: %v", volumeID, err) + + return nil, status.Errorf(codes.Internal, "failed to get NVMe-oF metadata: %v", err) + } + + // Unpublish NVMe-oF resources + if err := unpublishResources(ctx, nvmeofData, nodeID); err != nil { + return nil, status.Errorf(codes.Internal, "failed to unpublish resources: %v", err) + } + + return &csi.ControllerUnpublishVolumeResponse{}, nil +} + // validateCreateVolumeRequest validates the incoming request for nvmeof. // the rest of the parameters are validated by RBD. func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error { @@ -180,6 +260,22 @@ func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error { return nil } +// validatePublishVolumeRequest validates the incoming request for publishing a volume. +func validatePublishVolumeRequest(req *csi.ControllerPublishVolumeRequest) error { + volumeContext := req.GetVolumeContext() + requiredParams := []string{ + "subsystemNQN", "nvmeofGatewayAddress", "nvmeofGatewayPort", + "listenerIpAddress", "listenerPort", "listenerHostname", + } + for _, param := range requiredParams { + if volumeContext[param] == "" { + return fmt.Errorf("missing required parameter: %s", param) + } + } + + return util.ValidateControllerPublishVolumeRequest(req) +} + // ensureSubsystem checks if the subsystem exists, and creates it if not. func ensureSubsystem(ctx context.Context, gateway *nvmeof.GatewayRpcClient, subsystemNQN string) error { exists, err := gateway.SubsystemExists(ctx, subsystemNQN) @@ -362,6 +458,99 @@ func cleanupNVMeoFResources( return nil } +// publishResources publishes the HostNQN to be allowed to see the Volume. +func publishResources(ctx context.Context, + req *csi.ControllerPublishVolumeRequest, +) (string, error) { + nodeID := req.GetNodeId() + hostNQN, err := extractHostNQNFromNodeID(nodeID) + if err != nil { + return "", status.Errorf(codes.InvalidArgument, "invalid nodeID format: %v", err) + } + // Get volume context from the volume (contains subsystem info from CreateVolume) + volumeContext := req.GetVolumeContext() + subsystemNQN := volumeContext[vcSubsystemNQN] + gatewayAddr := volumeContext[vcGatewayAddress] + gatewayPortStr := volumeContext[vcGatewayPort] + + // Convert gateway port from string to uint32 + gatewayPort, err := strconv.ParseUint(gatewayPortStr, 10, 32) + if err != nil { + return "", fmt.Errorf("invalid gateway port %s: %w", gatewayPortStr, err) + } + // Connect to gateway and add host + config := &nvmeof.GatewayConfig{ + Address: gatewayAddr, + Port: uint32(gatewayPort), + } + gateway, err := connectGateway(ctx, config) + if err != nil { + return "", fmt.Errorf("gateway connection failed: %w", err) + } + defer func() { + if closeErr := gateway.Destroy(); closeErr != nil { + log.ErrorLog(ctx, "Warning: failed to close gateway connection: %v", closeErr) + } + }() + + // Add host to subsystem + if err := gateway.AddHost(ctx, subsystemNQN, hostNQN); err != nil { + return "", fmt.Errorf("failed to add host %s: %w", hostNQN, err) + } + + log.DebugLog(ctx, "Host %s successfully added to subsystem %s", hostNQN, subsystemNQN) + + return hostNQN, nil +} + +// unpublishResources removes the host from the NVMe-oF subsystem. +func unpublishResources(ctx context.Context, data *nvmeof.NVMeoFVolumeData, nodeID string) error { + // Extract host NQN from nodeID + hostNQN, err := extractHostNQNFromNodeID(nodeID) + if err != nil { + return fmt.Errorf("invalid nodeID format: %w", err) + } + subsystemNQN := data.SubsystemNQN + gatewayAddr := data.GatewayManagementInfo.Address + gatewayPort := data.GatewayManagementInfo.Port + + // Connect to gateway and add host + config := &nvmeof.GatewayConfig{ + Address: gatewayAddr, + Port: gatewayPort, + } + gateway, err := connectGateway(ctx, config) + if err != nil { + return fmt.Errorf("gateway connection failed: %w", err) + } + defer func() { + if closeErr := gateway.Destroy(); closeErr != nil { + log.ErrorLog(ctx, "Warning: failed to close gateway connection: %v", closeErr) + } + }() + + // Remove host from subsystem + if err := gateway.RemoveHost(ctx, subsystemNQN, hostNQN); err != nil { + return fmt.Errorf("failed to remove host %s from subsystem %s: %w", + hostNQN, subsystemNQN, err) + } + log.DebugLog(ctx, "Host %s removed from subsystem %s", hostNQN, subsystemNQN) + + return nil +} + +// extractHostNQNFromNodeID extracts the host NQN from the node ID. +func extractHostNQNFromNodeID(nodeID string) (string, error) { + // the nodeID has the pattern: :: + // TODO: maybe change this separator to rare one. + parts := strings.Split(nodeID, "::") + if len(parts) != 2 { + return "", fmt.Errorf("invalid nodeID format: %s", nodeID) + } + + return parts[1], nil +} + // VolumeContext metadata keys. const ( // NVMe-oF resource info. @@ -403,6 +592,17 @@ func populateVolumeContext(volume *csi.Volume, data *nvmeof.NVMeoFVolumeData) { volume.VolumeContext[vcGatewayPort] = gatewayManagementInfoPortStr } +// populatePublishContext creates a publish context for the volume. +func populatePublishContext(req *csi.ControllerPublishVolumeRequest, hostNqn string) map[string]string { + publishContext := make(map[string]string) + for key, value := range req.GetVolumeContext() { + publishContext[key] = value + } + publishContext[vcHostNQN] = hostNqn + + return publishContext +} + // storeNVMeoFMetadata stores all NVMe-oF data in RBD volume metadata for cleanup operations. func (cs *Server) storeNVMeoFMetadata( ctx context.Context, From cfb6d8704e5bd7b188a5148226c94bf5b237300e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 06:29:20 +0000 Subject: [PATCH 033/157] rebase: bump actions/dependency-review-action from 4.7.1 to 4.7.2 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.7.1 to 4.7.2. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/da24556b548a50705dd671f47852072ea4c105d9...bc41886e18ea39df68b1b1245f4184881938e050) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.7.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yaml b/.github/workflows/dependency-review.yaml index 964836fbeb1..0b23ac66377 100644 --- a/.github/workflows/dependency-review.yaml +++ b/.github/workflows/dependency-review.yaml @@ -25,6 +25,6 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: 'Dependency Review' # yamllint disable-line rule:line-length - uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1 + uses: actions/dependency-review-action@bc41886e18ea39df68b1b1245f4184881938e050 # v4.7.2 with: allow-ghsas: GHSA-f4w6-3rh6-6q4q From f15046e2f2c6347c9914cdd4cbd1b11af33363d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 05:25:45 +0000 Subject: [PATCH 034/157] rebase: bump the k8s-dependencies group in /e2e with 4 updates Bumps the k8s-dependencies group in /e2e with 4 updates: [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery), [k8s.io/cloud-provider](https://github.com/kubernetes/cloud-provider), [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) and [k8s.io/pod-security-admission](https://github.com/kubernetes/pod-security-admission). Updates `k8s.io/apimachinery` from 0.33.3 to 0.33.4 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.33.3...v0.33.4) Updates `k8s.io/cloud-provider` from 0.33.3 to 0.33.4 - [Commits](https://github.com/kubernetes/cloud-provider/compare/v0.33.3...v0.33.4) Updates `k8s.io/kubernetes` from 1.33.3 to 1.33.4 - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.33.3...v1.33.4) Updates `k8s.io/pod-security-admission` from 0.33.3 to 0.33.4 - [Commits](https://github.com/kubernetes/pod-security-admission/compare/v0.33.3...v0.33.4) --- updated-dependencies: - dependency-name: k8s.io/apimachinery dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-dependencies - dependency-name: k8s.io/cloud-provider dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-dependencies - dependency-name: k8s.io/kubernetes dependency-version: 1.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-dependencies - dependency-name: k8s.io/pod-security-admission dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] --- e2e/go.mod | 64 +- e2e/go.sum | 120 ++-- e2e/vendor/cel.dev/expr/eval.pb.go | 361 +++++----- .../prometheus/internal/difflib.go | 4 +- .../client_golang/prometheus/metric.go | 25 +- .../prometheus/process_collector_darwin.go | 6 +- .../process_collector_mem_nocgo_darwin.go | 2 +- .../process_collector_procfsenabled.go | 8 +- .../prometheus/promhttp/instrument_server.go | 2 +- .../client_golang/prometheus/vec.go | 10 +- .../client_golang/prometheus/wrap.go | 36 +- .../prometheus/common/expfmt/text_parse.go | 4 +- .../prometheus/common/model/alert.go | 2 +- .../prometheus/common/model/labels.go | 5 +- .../prometheus/common/model/metric.go | 28 +- .../prometheus/common/model/time.go | 25 +- .../prometheus/procfs/.golangci.yml | 63 +- .../prometheus/procfs/Makefile.common | 10 +- .../github.com/prometheus/procfs/README.md | 6 +- .../github.com/prometheus/procfs/arp.go | 4 +- e2e/vendor/github.com/prometheus/procfs/fs.go | 10 +- .../prometheus/procfs/fs_statfs_notype.go | 4 +- .../github.com/prometheus/procfs/fscache.go | 6 +- .../prometheus/procfs/internal/fs/fs.go | 3 + .../prometheus/procfs/internal/util/parse.go | 14 + .../procfs/internal/util/sysreadfile.go | 20 + .../prometheus/procfs/mountstats.go | 27 +- .../prometheus/procfs/net_dev_snmp6.go | 96 +++ .../prometheus/procfs/net_ip_socket.go | 8 +- .../prometheus/procfs/net_protocols.go | 21 +- .../github.com/prometheus/procfs/net_tcp.go | 4 + .../github.com/prometheus/procfs/net_unix.go | 8 +- .../github.com/prometheus/procfs/proc.go | 8 +- .../prometheus/procfs/proc_cgroup.go | 2 +- .../github.com/prometheus/procfs/proc_io.go | 2 +- .../prometheus/procfs/proc_netstat.go | 224 +++---- .../prometheus/procfs/proc_smaps.go | 4 +- .../github.com/prometheus/procfs/proc_snmp.go | 120 ++-- .../prometheus/procfs/proc_snmp6.go | 150 ++--- .../prometheus/procfs/proc_status.go | 18 +- .../github.com/prometheus/procfs/proc_sys.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 22 +- .../go.opentelemetry.io/otel/.golangci.yml | 452 +++++++------ .../go.opentelemetry.io/otel/CHANGELOG.md | 54 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 1 + e2e/vendor/go.opentelemetry.io/otel/Makefile | 19 +- e2e/vendor/go.opentelemetry.io/otel/README.md | 8 +- .../go.opentelemetry.io/otel/RELEASING.md | 18 + .../otel/attribute/filter.go | 4 +- .../internal}/attribute.go | 2 +- .../otel/attribute/rawhelpers.go | 37 + .../otel/attribute/value.go | 15 +- .../otel/dependencies.Dockerfile | 5 +- .../go.opentelemetry.io/otel/get_main_pkgs.sh | 30 - .../go.opentelemetry.io/otel/internal/gen.go | 18 - .../otel/internal/global/handler.go | 1 + .../otel/internal/global/meter.go | 45 +- .../otel/internal/global/trace.go | 13 +- .../otel/internal/rawhelpers.go | 48 -- .../otel/metric/asyncfloat64.go | 12 +- .../otel/metric/asyncint64.go | 8 +- .../otel/metric/instrument.go | 16 +- .../go.opentelemetry.io/otel/metric/meter.go | 10 +- .../otel/metric/noop/noop.go | 25 +- .../otel/propagation/baggage.go | 36 +- .../otel/propagation/propagation.go | 30 +- .../go.opentelemetry.io/otel/renovate.json | 7 +- .../otel/sdk/internal/env/env.go | 2 + .../otel/sdk/trace/id_generator.go | 26 +- .../otel/sdk/trace/provider.go | 12 +- .../otel/sdk/trace/tracer.go | 13 +- .../go.opentelemetry.io/otel/sdk/version.go | 3 +- .../go.opentelemetry.io/otel/trace/auto.go | 5 +- .../otel/trace/internal/telemetry/span.go | 56 +- .../otel/trace/internal/telemetry/status.go | 12 +- .../otel/trace/internal/telemetry/traces.go | 4 +- .../otel/trace/internal/telemetry/value.go | 2 +- .../go.opentelemetry.io/otel/trace/noop.go | 2 + .../otel/verify_readmes.sh | 21 - .../go.opentelemetry.io/otel/version.go | 2 +- .../go.opentelemetry.io/otel/versions.yaml | 8 +- e2e/vendor/golang.org/x/net/http2/http2.go | 2 - .../golang.org/x/oauth2/internal/doc.go | 2 +- .../golang.org/x/oauth2/internal/oauth2.go | 2 +- .../golang.org/x/oauth2/internal/token.go | 50 +- .../golang.org/x/oauth2/internal/transport.go | 4 +- e2e/vendor/golang.org/x/oauth2/oauth2.go | 55 +- e2e/vendor/golang.org/x/oauth2/pkce.go | 15 +- e2e/vendor/golang.org/x/oauth2/token.go | 15 +- e2e/vendor/golang.org/x/oauth2/transport.go | 24 +- e2e/vendor/golang.org/x/sys/unix/mkerrors.sh | 3 + .../golang.org/x/sys/unix/syscall_darwin.go | 56 +- .../golang.org/x/sys/unix/zerrors_linux.go | 44 +- .../x/sys/unix/zerrors_linux_386.go | 2 + .../x/sys/unix/zerrors_linux_amd64.go | 2 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 2 + .../x/sys/unix/zerrors_linux_loong64.go | 2 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + .../golang.org/x/sys/unix/ztypes_linux.go | 37 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 30 +- .../x/sys/unix/ztypes_linux_amd64.go | 28 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 32 +- .../x/sys/unix/ztypes_linux_arm64.go | 28 +- .../x/sys/unix/ztypes_linux_loong64.go | 28 +- .../x/sys/unix/ztypes_linux_mips.go | 30 +- .../x/sys/unix/ztypes_linux_mips64.go | 28 +- .../x/sys/unix/ztypes_linux_mips64le.go | 28 +- .../x/sys/unix/ztypes_linux_mipsle.go | 30 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 32 +- .../x/sys/unix/ztypes_linux_ppc64.go | 28 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 28 +- .../x/sys/unix/ztypes_linux_riscv64.go | 28 +- .../x/sys/unix/ztypes_linux_s390x.go | 28 +- .../x/sys/unix/ztypes_linux_sparc64.go | 28 +- e2e/vendor/golang.org/x/term/term_windows.go | 4 +- e2e/vendor/golang.org/x/term/terminal.go | 9 +- .../x/tools/go/ast/inspector/typeof.go | 1 - .../rpc/errdetails/error_details.pb.go | 313 ++++++--- .../grpc/balancer/balancer.go | 8 +- .../endpointsharding/endpointsharding.go | 36 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 27 +- .../grpc/balancer/roundrobin/roundrobin.go | 7 - .../google.golang.org/grpc/dialoptions.go | 21 + .../health/grpc_health_v1/health_grpc.pb.go | 6 +- .../balancer/gracefulswitch/gracefulswitch.go | 10 +- .../grpc/internal/envconfig/envconfig.go | 4 - .../grpc/internal/transport/controlbuf.go | 68 +- .../grpc/internal/transport/http2_client.go | 20 +- .../grpc/internal/transport/http2_server.go | 44 +- .../grpc/internal/transport/http_util.go | 3 - .../grpc/internal/transport/transport.go | 3 + .../grpc/mem/buffer_slice.go | 11 + e2e/vendor/google.golang.org/grpc/server.go | 26 + e2e/vendor/google.golang.org/grpc/stream.go | 4 +- e2e/vendor/google.golang.org/grpc/version.go | 2 +- .../protobuf/encoding/protowire/wire.go | 26 +- .../editiondefaults/editions_defaults.binpb | Bin 146 -> 154 bytes .../protobuf/internal/filedesc/editions.go | 3 + .../protobuf/internal/filedesc/presence.go | 33 + .../protobuf/internal/genid/descriptor_gen.go | 90 ++- .../internal/impl/codec_message_opaque.go | 3 +- .../protobuf/internal/impl/message_opaque.go | 45 +- .../protobuf/internal/impl/presence.go | 3 - .../protobuf/internal/version/version.go | 2 +- .../reflect/protoreflect/source_gen.go | 8 + .../types/descriptorpb/descriptor.pb.go | 633 ++++++++++++------ .../component-helpers/resource/helpers.go | 7 +- .../kubernetes/test/utils/image/manifest.go | 29 +- e2e/vendor/modules.txt | 81 ++- 174 files changed, 2985 insertions(+), 2033 deletions(-) create mode 100644 e2e/vendor/github.com/prometheus/procfs/net_dev_snmp6.go rename e2e/vendor/go.opentelemetry.io/otel/{internal/attribute => attribute/internal}/attribute.go (97%) create mode 100644 e2e/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go delete mode 100644 e2e/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh delete mode 100644 e2e/vendor/go.opentelemetry.io/otel/internal/gen.go delete mode 100644 e2e/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go delete mode 100644 e2e/vendor/go.opentelemetry.io/otel/verify_readmes.sh create mode 100644 e2e/vendor/google.golang.org/protobuf/internal/filedesc/presence.go diff --git a/e2e/go.mod b/e2e/go.mod index f2fb1f2a280..7e1eb4359f9 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -1,8 +1,6 @@ module github.com/ceph/ceph-csi/e2e -go 1.24.0 - -toolchain go1.24.2 +go 1.24.4 // my own dependencies replace ( @@ -18,12 +16,12 @@ require ( github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.38.0 // when updating k8s.io modules, update the 'replace' section below too - k8s.io/api v0.33.3 - k8s.io/apimachinery v0.33.3 + k8s.io/api v0.33.4 + k8s.io/apimachinery v0.33.4 k8s.io/client-go v12.0.0+incompatible - k8s.io/cloud-provider v0.33.3 - k8s.io/kubernetes v1.33.3 - k8s.io/pod-security-admission v0.33.3 + k8s.io/cloud-provider v0.33.4 + k8s.io/kubernetes v1.33.4 + k8s.io/pod-security-admission v0.33.4 ) replace ( @@ -39,7 +37,7 @@ replace ( ) require ( - cel.dev/expr v0.23.0 // indirect + cel.dev/expr v0.24.0 // indirect github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect @@ -98,10 +96,10 @@ require ( github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opencontainers/selinux v1.11.1 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_golang v1.23.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -110,36 +108,36 @@ require ( go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/sdk v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect - golang.org/x/crypto v0.40.0 // indirect + golang.org/x/crypto v0.41.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.42.0 // indirect - golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.34.0 // indirect - golang.org/x/term v0.33.0 // indirect - golang.org/x/text v0.27.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.34.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect - google.golang.org/grpc v1.73.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + golang.org/x/tools v0.35.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/grpc v1.74.2 // indirect + google.golang.org/protobuf v1.36.7 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.33.1 // indirect - k8s.io/apiserver v0.33.3 // indirect - k8s.io/component-base v0.33.3 // indirect - k8s.io/component-helpers v0.33.3 // indirect - k8s.io/controller-manager v0.33.3 // indirect + k8s.io/apiserver v0.33.4 // indirect + k8s.io/component-base v0.33.4 // indirect + k8s.io/component-helpers v0.33.4 // indirect + k8s.io/controller-manager v0.33.4 // indirect k8s.io/cri-api v0.33.2 // indirect k8s.io/cri-client v0.0.0 // indirect k8s.io/csi-translation-lib v0.33.1 // indirect @@ -149,7 +147,7 @@ require ( k8s.io/kube-scheduler v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.33.2 // indirect - k8s.io/mount-utils v0.33.1 // indirect + k8s.io/mount-utils v0.33.3 // indirect k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff --git a/e2e/go.sum b/e2e/go.sum index 445611bddfd..522b5c61ec5 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss= -cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= @@ -157,14 +157,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -198,20 +198,20 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -225,8 +225,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= -golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -235,10 +235,10 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= -golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= -golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= -golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -248,34 +248,34 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -290,20 +290,20 @@ k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY= k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs= k8s.io/apiextensions-apiserver v0.33.2 h1:6gnkIbngnaUflR3XwE1mCefN3YS8yTD631JXQhsU6M8= k8s.io/apiextensions-apiserver v0.33.2/go.mod h1:IvVanieYsEHJImTKXGP6XCOjTwv2LUMos0YWc9O+QP8= -k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= -k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.3 h1:Wv0hGc+QFdMJB4ZSiHrCgN3zL3QRatu56+rpccKC3J4= -k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.4 h1:6N0TEVA6kASUS3owYDIFJjUH6lgN8ogQmzZvaFFj1/Y= +k8s.io/apiserver v0.33.4/go.mod h1:8ODgXMnOoSPLMUg1aAzMFx+7wTJM+URil+INjbTZCok= k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E= k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo= -k8s.io/cloud-provider v0.33.3 h1:HzpZh0W0MJoLkMGYrMjkHbwcGFIZxitgMf4utAwfPEY= -k8s.io/cloud-provider v0.33.3/go.mod h1:KcXaoYCJtTTiP+8IIEHcJDpvg0QzW67+FGWpgkOteDU= -k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= -k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= -k8s.io/component-helpers v0.33.3 h1:fjWVORSQfI0WKzPeIFSju/gMD9sybwXBJ7oPbqQu6eM= -k8s.io/component-helpers v0.33.3/go.mod h1:7iwv+Y9Guw6X4RrnNQOyQlXcvJrVjPveHVqUA5dm31c= -k8s.io/controller-manager v0.33.3 h1:OItg5te3ixRw9MFko5KW2ed4ogBbwnJfrS4mCXixbsg= -k8s.io/controller-manager v0.33.3/go.mod h1:sH/I5CXliIc+3bnEjdalgSTJ/3fJhIHrDA3sOwTNgxM= +k8s.io/cloud-provider v0.33.4 h1:et4DyeV0W8W+m2ByS34VVFMg8Aj0sz+UDVwanNkspTo= +k8s.io/cloud-provider v0.33.4/go.mod h1:cAC2s7mGpqVWwUars8TFgnvgXy+trDOF3+WSeKNsy/M= +k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= +k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= +k8s.io/component-helpers v0.33.4 h1:DYHQPxWB3XIk7hwAQ4YczUelJ37PcUHfnLeee0qFqV8= +k8s.io/component-helpers v0.33.4/go.mod h1:kRgidIgCKFqOW/wy7D8IL3YOT3iaIRZu6FcTEyRr7WU= +k8s.io/controller-manager v0.33.4 h1:HmlzmmNPu8H+cKEpAIRz0ptqpveKcj7KrCx9G+HXRAg= +k8s.io/controller-manager v0.33.4/go.mod h1:CpO8RarLcs7zh0sE4pqz88quF3xU3Dc4ZDfshnB8hw4= k8s.io/cri-api v0.33.2 h1:1OiWm6gUx7JrN+xqxMzGDCPfPxVT8b6n7B6SeYl5luM= k8s.io/cri-api v0.33.2/go.mod h1:OLQvT45OpIA+tv91ZrpuFIGY+Y2Ho23poS7n115Aocs= k8s.io/cri-client v0.33.2 h1:EostRehqo+XunR5soCzsa1uX55T0DaZ7H5q9wfMvIVE= @@ -322,12 +322,12 @@ k8s.io/kubectl v0.33.2 h1:7XKZ6DYCklu5MZQzJe+CkCjoGZwD1wWl7t/FxzhMz7Y= k8s.io/kubectl v0.33.2/go.mod h1:8rC67FB8tVTYraovAGNi/idWIK90z2CHFNMmGJZJ3KI= k8s.io/kubelet v0.33.2 h1:wxEau5/563oJb3j3KfrCKlNWWx35YlSgDLOYUBCQ0pg= k8s.io/kubelet v0.33.2/go.mod h1:way8VCDTUMiX1HTOvJv7M3xS/xNysJI6qh7TOqMe5KM= -k8s.io/kubernetes v1.33.3 h1:dBx5Z2ZhR8kNzAwCoCz4j1niUbUrNUDVxeSj4/Ienu0= -k8s.io/kubernetes v1.33.3/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00= -k8s.io/mount-utils v0.33.1 h1:hodPhfyoK+gG0SgnYwx1iPrlnpaESZiJ9GFzF5V/imE= -k8s.io/mount-utils v0.33.1/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE= -k8s.io/pod-security-admission v0.33.3 h1:QjpEeaWV8hzCDq8YEtNmKOKlG6dARMK3zTZ98m3OYVI= -k8s.io/pod-security-admission v0.33.3/go.mod h1:GhVxV5tSx0HlclRcEd00Y5idzagPgqYo5GvWC8QEkX4= +k8s.io/kubernetes v1.33.4 h1:T1d5FLUYm3/KyUeV7YJhKTR980zHCHb7K2xhCSo3lE8= +k8s.io/kubernetes v1.33.4/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00= +k8s.io/mount-utils v0.33.3 h1:Q1jsnqdS4LdtJSYSXgiQv/XNrRHQncLk3gMYjKNSZrE= +k8s.io/mount-utils v0.33.3/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE= +k8s.io/pod-security-admission v0.33.4 h1:adSwY7a/Q4Eoj+uCUfav90xRe6mB8waF0HAZ4gZeWD0= +k8s.io/pod-security-admission v0.33.4/go.mod h1:K+4JaqBz5yqE7TXf3g5zEiBLcu9RdW2BjTWydFEror4= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= diff --git a/e2e/vendor/cel.dev/expr/eval.pb.go b/e2e/vendor/cel.dev/expr/eval.pb.go index 8f651f9cc6a..a7aae0900c4 100644 --- a/e2e/vendor/cel.dev/expr/eval.pb.go +++ b/e2e/vendor/cel.dev/expr/eval.pb.go @@ -1,15 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.3 +// protoc v5.27.1 // source: cel/expr/eval.proto package expr import ( - status "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -22,21 +22,18 @@ const ( ) type EvalState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` unknownFields protoimpl.UnknownFields - - Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EvalState) Reset() { *x = EvalState{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EvalState) String() string { @@ -47,7 +44,7 @@ func (*EvalState) ProtoMessage() {} func (x *EvalState) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -77,25 +74,22 @@ func (x *EvalState) GetResults() []*EvalState_Result { } type ExprValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Kind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Kind: // // *ExprValue_Value // *ExprValue_Error // *ExprValue_Unknown - Kind isExprValue_Kind `protobuf_oneof:"kind"` + Kind isExprValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExprValue) Reset() { *x = ExprValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExprValue) String() string { @@ -106,7 +100,7 @@ func (*ExprValue) ProtoMessage() {} func (x *ExprValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -121,30 +115,36 @@ func (*ExprValue) Descriptor() ([]byte, []int) { return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} } -func (m *ExprValue) GetKind() isExprValue_Kind { - if m != nil { - return m.Kind +func (x *ExprValue) GetKind() isExprValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *ExprValue) GetValue() *Value { - if x, ok := x.GetKind().(*ExprValue_Value); ok { - return x.Value + if x != nil { + if x, ok := x.Kind.(*ExprValue_Value); ok { + return x.Value + } } return nil } func (x *ExprValue) GetError() *ErrorSet { - if x, ok := x.GetKind().(*ExprValue_Error); ok { - return x.Error + if x != nil { + if x, ok := x.Kind.(*ExprValue_Error); ok { + return x.Error + } } return nil } func (x *ExprValue) GetUnknown() *UnknownSet { - if x, ok := x.GetKind().(*ExprValue_Unknown); ok { - return x.Unknown + if x != nil { + if x, ok := x.Kind.(*ExprValue_Unknown); ok { + return x.Unknown + } } return nil } @@ -172,20 +172,17 @@ func (*ExprValue_Error) isExprValue_Kind() {} func (*ExprValue_Unknown) isExprValue_Kind() {} type ErrorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` unknownFields protoimpl.UnknownFields - - Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ErrorSet) Reset() { *x = ErrorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorSet) String() string { @@ -196,7 +193,7 @@ func (*ErrorSet) ProtoMessage() {} func (x *ErrorSet) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -211,28 +208,85 @@ func (*ErrorSet) Descriptor() ([]byte, []int) { return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} } -func (x *ErrorSet) GetErrors() []*status.Status { +func (x *ErrorSet) GetErrors() []*Status { if x != nil { return x.Errors } return nil } -type UnknownSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Status struct { + state protoimpl.MessageState `protogen:"open.v1"` + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +func (x *Status) Reset() { + *x = Status{} + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UnknownSet) Reset() { - *x = UnknownSet{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[3] +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState `protogen:"open.v1"` + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UnknownSet) String() string { @@ -242,8 +296,8 @@ func (x *UnknownSet) String() string { func (*UnknownSet) ProtoMessage() {} func (x *UnknownSet) ProtoReflect() protoreflect.Message { - mi := &file_cel_expr_eval_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -255,7 +309,7 @@ func (x *UnknownSet) ProtoReflect() protoreflect.Message { // Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. func (*UnknownSet) Descriptor() ([]byte, []int) { - return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} + return file_cel_expr_eval_proto_rawDescGZIP(), []int{4} } func (x *UnknownSet) GetExprs() []int64 { @@ -266,21 +320,18 @@ func (x *UnknownSet) GetExprs() []int64 { } type EvalState_Result struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EvalState_Result) Reset() { *x = EvalState_Result{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EvalState_Result) String() string { @@ -290,8 +341,8 @@ func (x *EvalState_Result) String() string { func (*EvalState_Result) ProtoMessage() {} func (x *EvalState_Result) ProtoReflect() protoreflect.Message { - mi := &file_cel_expr_eval_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cel_expr_eval_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -325,39 +376,45 @@ var File_cel_expr_eval_proto protoreflect.FileDescriptor var file_cel_expr_eval_proto_rawDesc = []byte{ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, - 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, - 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, - 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, - 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, - 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, - 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, - 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, + 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, + 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28, + 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, + 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, + 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -372,28 +429,30 @@ func file_cel_expr_eval_proto_rawDescGZIP() []byte { return file_cel_expr_eval_proto_rawDescData } -var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_cel_expr_eval_proto_goTypes = []interface{}{ +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_cel_expr_eval_proto_goTypes = []any{ (*EvalState)(nil), // 0: cel.expr.EvalState (*ExprValue)(nil), // 1: cel.expr.ExprValue (*ErrorSet)(nil), // 2: cel.expr.ErrorSet - (*UnknownSet)(nil), // 3: cel.expr.UnknownSet - (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result - (*Value)(nil), // 5: cel.expr.Value - (*status.Status)(nil), // 6: google.rpc.Status + (*Status)(nil), // 3: cel.expr.Status + (*UnknownSet)(nil), // 4: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result + (*Value)(nil), // 6: cel.expr.Value + (*anypb.Any)(nil), // 7: google.protobuf.Any } var file_cel_expr_eval_proto_depIdxs = []int32{ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue - 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result - 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet - 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet - 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status + 7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_cel_expr_eval_proto_init() } @@ -402,69 +461,7 @@ func file_cel_expr_eval_proto_init() { return } file_cel_expr_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExprValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UnknownSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState_Result); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{ (*ExprValue_Value)(nil), (*ExprValue_Error)(nil), (*ExprValue_Unknown)(nil), @@ -475,7 +472,7 @@ func file_cel_expr_eval_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_cel_expr_eval_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/e2e/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/e2e/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index 8b016355adb..7bac0da33df 100644 --- a/e2e/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/e2e/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') { groups = append(groups, group) } return groups @@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) + _, err := fmt.Fprintf(buf, format, args...) return err } ws := func(s string) error { diff --git a/e2e/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/e2e/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 592eec3e24f..76e59f12880 100644 --- a/e2e/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/e2e/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { case pb.Counter != nil: pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] case pb.Histogram != nil: + h := pb.Histogram for _, e := range m.exemplars { - // pb.Histogram.Bucket are sorted by UpperBound. - i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { - return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 || + len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) && + e.GetTimestamp() != nil { + h.Exemplars = append(h.Exemplars, e) + if len(h.Bucket) == 0 { + // Don't proceed to classic buckets if there are none. + continue + } + } + // h.Bucket are sorted by UpperBound. + i := sort.Search(len(h.Bucket), func(i int) bool { + return h.Bucket[i].GetUpperBound() >= e.GetValue() }) - if i < len(pb.Histogram.Bucket) { - pb.Histogram.Bucket[i].Exemplar = e + if i < len(h.Bucket) { + h.Bucket[i].Exemplar = e } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + CumulativeCount: proto.Uint64(h.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } - pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + h.Bucket = append(h.Bucket, b) } } default: @@ -227,6 +237,7 @@ type Exemplar struct { // Only last applicable exemplar is injected from the list. // For example for Counter it means last exemplar is injected. // For Histogram, it means last applicable exemplar for each bucket is injected. +// For a Native Histogram, all valid exemplars are injected. // // NewMetricWithExemplars works best with MustNewConstMetric and // MustNewConstHistogram, see example. diff --git a/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go index 0a61b984613..b32c95fa3fa 100644 --- a/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go +++ b/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -25,9 +25,9 @@ import ( "golang.org/x/sys/unix" ) -// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// errNotImplemented is returned by stub functions that replace cgo functions, when cgo // isn't available. -var notImplementedErr = errors.New("not implemented") +var errNotImplemented = errors.New("not implemented") type memoryInfo struct { vsize uint64 // Virtual memory size in bytes @@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if memInfo, err := getMemory(); err == nil { ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) - } else if !errors.Is(err, notImplementedErr) { + } else if !errors.Is(err, errNotImplemented) { // Don't report an error when support is not compiled in. c.reportError(ch, c.rss, err) c.reportError(ch, c.vsize, err) diff --git a/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go index 8ddb0995d6a..378865129b7 100644 --- a/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go +++ b/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -16,7 +16,7 @@ package prometheus func getMemory() (*memoryInfo, error) { - return nil, notImplementedErr + return nil, errNotImplemented } // describe returns all descriptions of the collector for Darwin. diff --git a/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 9f4b130befa..8074f70f5d9 100644 --- a/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go +++ b/e2e/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if netstat, err := p.Netstat(); err == nil { var inOctets, outOctets float64 - if netstat.IpExt.InOctets != nil { - inOctets = *netstat.IpExt.InOctets + if netstat.InOctets != nil { + inOctets = *netstat.InOctets } - if netstat.IpExt.OutOctets != nil { - outOctets = *netstat.IpExt.OutOctets + if netstat.OutOctets != nil { + outOctets = *netstat.OutOctets } ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) diff --git a/e2e/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/e2e/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 356edb7868c..9332b0249a9 100644 --- a/e2e/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/e2e/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { labels := prometheus.Labels{} - if !(code || method) { + if !code && !method { return labels } diff --git a/e2e/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/e2e/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 2c808eece0a..487b466563b 100644 --- a/e2e/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/e2e/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { return false } - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) + return m.deleteByHashWithLabelValues(h, lvs, m.curry) } // Delete deletes the metric where the variable labels are the same as those @@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool { return false } - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) + return m.deleteByHashWithLabels(h, labels, m.curry) } // DeletePartialMatch deletes all metrics where the variable labels contain all of those @@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int { labels, closer := constrainLabels(m.desc, labels) defer closer() - return m.metricMap.deleteByLabels(labels, m.curry) + return m.deleteByLabels(labels, m.curry) } // Without explicit forwarding of Describe, Collect, Reset, those methods won't @@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil + return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } // GetMetricWith returns the Metric for the given Labels map (the label names @@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil + return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { diff --git a/e2e/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/e2e/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 25da157f152..2ed1285068e 100644 --- a/e2e/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/e2e/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // metric names that are standardized across applications, as that would break // horizontal monitoring, for example the metrics provided by the Go collector // (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, +// fact, those metrics are already prefixed with "go_" or "process_", // respectively.) // // Conflicts between Collectors registered through the original Registerer with @@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { } } +// WrapCollectorWith returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapCollectorWith can be useful to work with multiple instances of a third +// party library that does not expose enough flexibility on the lifecycle of its +// registered metrics. +// For example, let's say you have a foo.New(reg Registerer) constructor that +// registers metrics but never unregisters them, and you want to create multiple +// instances of foo.Foo with different labels. +// The way to achieve that, is to create a new Registry, pass it to foo.New, +// then use WrapCollectorWith to wrap that Registry with the desired labels and +// register that as a collector in your main Registry. +// Then you can un-register the wrapped collector effectively un-registering the +// metrics registered by foo.New. +func WrapCollectorWith(labels Labels, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + labels: labels, + } +} + +// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided prefix to the name of all Metrics it collects. +// +// See the documentation of WrapCollectorWith for more details on the use case. +func WrapCollectorWithPrefix(prefix string, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + prefix: prefix, + } +} + type wrappingRegisterer struct { wrappedRegisterer Registerer prefix string diff --git a/e2e/vendor/github.com/prometheus/common/expfmt/text_parse.go b/e2e/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d27..4067978a178 100644 --- a/e2e/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/e2e/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn { } // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && + (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. diff --git a/e2e/vendor/github.com/prometheus/common/model/alert.go b/e2e/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e14..460f554f294 100644 --- a/e2e/vendor/github.com/prometheus/common/model/alert.go +++ b/e2e/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/e2e/vendor/github.com/prometheus/common/model/labels.go b/e2e/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e60b..de83afe93e9 100644 --- a/e2e/vendor/github.com/prometheus/common/model/labels.go +++ b/e2e/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool { return false } for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck return false } } diff --git a/e2e/vendor/github.com/prometheus/common/model/metric.go b/e2e/vendor/github.com/prometheus/common/model/metric.go index 5766107cf95..a6b01755bd4 100644 --- a/e2e/vendor/github.com/prometheus/common/model/metric.go +++ b/e2e/vendor/github.com/prometheus/common/model/metric.go @@ -27,13 +27,25 @@ import ( ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 - // mode in isolation from other components that don't support UTF-8 may result - // in bugs or other undefined behavior. This value can be set to - // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To - // avoid need for locking, this value should be set once, ideally in an - // init(), before multiple goroutines are started. + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when @@ -50,7 +62,7 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. LegacyValidation ValidationScheme = iota diff --git a/e2e/vendor/github.com/prometheus/common/model/time.go b/e2e/vendor/github.com/prometheus/common/model/time.go index 5727452c1ee..fed9e87b915 100644 --- a/e2e/vendor/github.com/prometheus/common/model/time.go +++ b/e2e/vendor/github.com/prometheus/common/model/time.go @@ -201,6 +201,7 @@ var unitMap = map[string]struct { // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. +// Negative durations are not supported. func ParseDuration(s string) (Duration, error) { switch s { case "0": @@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) { return 0, errors.New("duration out of range") } } + return Duration(dur), nil } +// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations. +func ParseDurationAllowNegative(s string) (Duration, error) { + if s == "" || s[0] != '-' { + return ParseDuration(s) + } + + d, err := ParseDuration(s[1:]) + + return -d, err +} + func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" + ms = int64(time.Duration(d) / time.Millisecond) + r = "" + sign = "" ) + if ms == 0 { return "0s" } + if ms < 0 { + sign, ms = "-", -ms + } + f := func(unit string, mult int64, exact bool) { if exact && ms%mult != 0 { return @@ -286,7 +305,7 @@ func (d Duration) String() string { f("s", 1000, false) f("ms", 1, false) - return r + return sign + r } // MarshalJSON implements the json.Marshaler interface. diff --git a/e2e/vendor/github.com/prometheus/procfs/.golangci.yml b/e2e/vendor/github.com/prometheus/procfs/.golangci.yml index 126df9e67ac..3c3bf910fdf 100644 --- a/e2e/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/e2e/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,22 +1,45 @@ ---- +version: "2" linters: enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - testifylint - - unused - -linter-settings: - godot: - capital: true - exclude: - # Ignore "See: URL" - - 'See:' - misspell: - locale: US + - forbidigo + - godot + - misspell + - revive + - testifylint + settings: + forbidigo: + forbid: + - pattern: ^fmt\.Print.*$ + msg: Do not commit print statements. + godot: + exclude: + # Ignore "See: URL". + - 'See:' + capital: true + misspell: + locale: US + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/prometheus/procfs + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/e2e/vendor/github.com/prometheus/procfs/Makefile.common b/e2e/vendor/github.com/prometheus/procfs/Makefile.common index 16172923506..0ed55c2ba21 100644 --- a/e2e/vendor/github.com/prometheus/procfs/Makefile.common +++ b/e2e/vendor/github.com/prometheus/procfs/Makefile.common @@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.0 +GOLANGCI_LINT_VERSION ?= v2.0.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -275,3 +275,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/e2e/vendor/github.com/prometheus/procfs/README.md b/e2e/vendor/github.com/prometheus/procfs/README.md index 1224816c2ad..0718239cf19 100644 --- a/e2e/vendor/github.com/prometheus/procfs/README.md +++ b/e2e/vendor/github.com/prometheus/procfs/README.md @@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`. The procfs library includes a set of test fixtures which include many example files from the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. +ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. ```bash rm -rf testdata/fixtures make test ``` -Next, make the required changes to the extracted files in the `fixtures` directory. When +Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using `git diff testdata/fixtures.ttar`. diff --git a/e2e/vendor/github.com/prometheus/procfs/arp.go b/e2e/vendor/github.com/prometheus/procfs/arp.go index cdcc8a7ccc4..2e53344151f 100644 --- a/e2e/vendor/github.com/prometheus/procfs/arp.go +++ b/e2e/vendor/github.com/prometheus/procfs/arp.go @@ -23,9 +23,9 @@ import ( // Learned from include/uapi/linux/if_arp.h. const ( - // completed entry (ha valid). + // Completed entry (ha valid). ATFComplete = 0x02 - // permanent entry. + // Permanent entry. ATFPermanent = 0x04 // Publish entry. ATFPublish = 0x08 diff --git a/e2e/vendor/github.com/prometheus/procfs/fs.go b/e2e/vendor/github.com/prometheus/procfs/fs.go index 4980c875bfc..9bdaccc7c8a 100644 --- a/e2e/vendor/github.com/prometheus/procfs/fs.go +++ b/e2e/vendor/github.com/prometheus/procfs/fs.go @@ -24,8 +24,14 @@ type FS struct { isReal bool } -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const ( + // DefaultMountPoint is the common mount point of the proc filesystem. + DefaultMountPoint = fs.DefaultProcMountPoint + + // SectorSize represents the size of a sector in bytes. + // It is specific to Linux block I/O operations. + SectorSize = 512 +) // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. // It will error if the mount point directory can't be read or is a file. diff --git a/e2e/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/e2e/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 134767d69ac..1b5bdbdf84a 100644 --- a/e2e/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/e2e/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -17,7 +17,7 @@ package procfs // isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct -func isRealProc(mountPoint string) (bool, error) { +// in their Statfs_t struct. +func isRealProc(_ string) (bool, error) { return true, nil } diff --git a/e2e/vendor/github.com/prometheus/procfs/fscache.go b/e2e/vendor/github.com/prometheus/procfs/fscache.go index cf2e3eaa03c..7db86330779 100644 --- a/e2e/vendor/github.com/prometheus/procfs/fscache.go +++ b/e2e/vendor/github.com/prometheus/procfs/fscache.go @@ -162,7 +162,7 @@ type Fscacheinfo struct { ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 // Number of release reqs ignored due to in-progress store ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req + // Number of page stores canceled due to release req PageStoresCancelledByReleaseRequests uint64 VmscanWaiting uint64 // Number of times async ops added to pending queues @@ -171,11 +171,11 @@ type Fscacheinfo struct { OpsRunning uint64 // Number of times async ops queued for processing OpsEnqueued uint64 - // Number of async ops cancelled + // Number of async ops canceled OpsCancelled uint64 // Number of async ops rejected due to object lookup/create failure OpsRejected uint64 - // Number of async ops initialised + // Number of async ops initialized OpsInitialised uint64 // Number of async ops queued for deferred release OpsDeferred uint64 diff --git a/e2e/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/e2e/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3c18c7610ef..3a43e83915f 100644 --- a/e2e/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/e2e/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -28,6 +28,9 @@ const ( // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" + + // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. + DefaultSelinuxMountPoint = "/sys/fs/selinux" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/e2e/vendor/github.com/prometheus/procfs/internal/util/parse.go b/e2e/vendor/github.com/prometheus/procfs/internal/util/parse.go index 14272dc7885..5a7d2df06ae 100644 --- a/e2e/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/e2e/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,6 +14,7 @@ package util import ( + "errors" "os" "strconv" "strings" @@ -110,3 +111,16 @@ func ParseBool(b string) *bool { } return &truth } + +// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. +func ReadHexFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + hexString := strings.TrimSpace(string(data)) + if !strings.HasPrefix(hexString, "0x") { + return 0, errors.New("invalid format: hex string does not start with '0x'") + } + return strconv.ParseUint(hexString[2:], 16, 64) +} diff --git a/e2e/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/e2e/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index 1ab875ceec6..d5404a6d728 100644 --- a/e2e/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/e2e/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -20,6 +20,8 @@ package util import ( "bytes" "os" + "strconv" + "strings" "syscall" ) @@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) { return string(bytes.TrimSpace(b[:n])), nil } + +// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. +func SysReadUintFromFile(path string) (uint64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. +func SysReadIntFromFile(path string) (int64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/e2e/vendor/github.com/prometheus/procfs/mountstats.go b/e2e/vendor/github.com/prometheus/procfs/mountstats.go index 75a3b6c810f..50caa73274e 100644 --- a/e2e/vendor/github.com/prometheus/procfs/mountstats.go +++ b/e2e/vendor/github.com/prometheus/procfs/mountstats.go @@ -45,11 +45,11 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 - // kernel version >= 4.14 MaxLen + // Kernel version >= 4.14 MaxLen // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 fieldTransport11RDMAMaxLen = 28 - // kernel version <= 4.2 MinLen + // Kernel version <= 4.2 MinLen // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 fieldTransport11RDMAMinLen = 20 ) @@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats switch statVersion { case statVersion10: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport10UDPLen - } else { + default: return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { @@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } case statVersion11: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport11UDPLen - } else if protocol == "rdma" { + case "rdma": expectedLength = fieldTransport11RDMAMinLen - } else { + default: return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || @@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // For the udp RPC transport there is no connection count, connect idle time, // or idle time (fields #3, #4, and #5); all other fields are the same. So // we set them to 0 here. - if protocol == "udp" { + switch protocol { + case "udp": ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } else if protocol == "tcp" { + case "tcp": ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - } else if protocol == "rdma" { + case "rdma": ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } diff --git a/e2e/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/e2e/vendor/github.com/prometheus/procfs/net_dev_snmp6.go new file mode 100644 index 00000000000..f50b38e3528 --- /dev/null +++ b/e2e/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" +) + +// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. +// The outer map's keys are interface names and the inner map's keys are stat names. +// +// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. +type NetDevSNMP6 map[string]map[string]uint64 + +// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ +// directory. +func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) +} + +// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ +// directory. +func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(p.path("net/dev_snmp6")) +} + +// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. +func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { + netDevSNMP6 := make(NetDevSNMP6) + + // The net/dev_snmp6 folders contain one file per interface + ifaceFiles, err := os.ReadDir(dir) + if err != nil { + // On systems with IPv6 disabled, this directory won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return netDevSNMP6, err + } + return netDevSNMP6, err + } + + for _, iFaceFile := range ifaceFiles { + f, err := os.Open(dir + "/" + iFaceFile.Name()) + if err != nil { + return netDevSNMP6, err + } + defer f.Close() + + netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) + if err != nil { + return netDevSNMP6, err + } + } + + return netDevSNMP6, nil +} + +func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { + m := make(map[string]uint64) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + key, val := stat[0], stat[1] + + // Expect stat name to contain "6" or be "ifIndex" + if strings.Contains(key, "6") || key == "ifIndex" { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return m, err + } + + m[key] = v + } + } + return m, scanner.Err() +} diff --git a/e2e/vendor/github.com/prometheus/procfs/net_ip_socket.go b/e2e/vendor/github.com/prometheus/procfs/net_ip_socket.go index b70f1fc7a4a..19e3378f72d 100644 --- a/e2e/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/e2e/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -25,7 +25,7 @@ import ( ) const ( - // readLimit is used by io.LimitReader while reading the content of the + // Maximum size limit used by io.LimitReader while reading the content of the // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic // as each line represents a single used socket. // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. @@ -50,12 +50,12 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 - // Drops shows the total number of dropped packets of all UPD sockets. + // Drops shows the total number of dropped packets of all UDP sockets. Drops *uint64 } - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // A single line parser for fields from /proc/net/{t,u}dp{,6}. + // Fields which are not used by IPSocket are skipped. // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { diff --git a/e2e/vendor/github.com/prometheus/procfs/net_protocols.go b/e2e/vendor/github.com/prometheus/procfs/net_protocols.go index b6c77b709fa..8d4b1ac05b0 100644 --- a/e2e/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/e2e/vendor/github.com/prometheus/procfs/net_protocols.go @@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro if err != nil { return nil, err } - if fields[4] == enabled { + switch fields[4] { + case enabled: line.Pressure = 1 - } else if fields[4] == disabled { + case disabled: line.Pressure = 0 - } else { + default: line.Pressure = -1 } line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } - if fields[6] == enabled { + switch fields[6] { + case enabled: line.Slab = true - } else if fields[6] == disabled { + case disabled: line.Slab = false - } else { + default: return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { + switch capabilities[i] { + case "y": *capabilityFields[i] = true - } else if capabilities[i] == "n" { + case "n": *capabilityFields[i] = false - } else { + default: return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } diff --git a/e2e/vendor/github.com/prometheus/procfs/net_tcp.go b/e2e/vendor/github.com/prometheus/procfs/net_tcp.go index 52776295572..0396d72015c 100644 --- a/e2e/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/e2e/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,24 +25,28 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) } // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) } // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) } // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) } diff --git a/e2e/vendor/github.com/prometheus/procfs/net_unix.go b/e2e/vendor/github.com/prometheus/procfs/net_unix.go index d868cebdaae..d7e0cacb4c6 100644 --- a/e2e/vendor/github.com/prometheus/procfs/net_unix.go +++ b/e2e/vendor/github.com/prometheus/procfs/net_unix.go @@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { return &nu, nil } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { fields := strings.Fields(line) l := len(fields) - if l < min { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) + if l < minFields { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) } // Field offsets are as follows: @@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { + if l > minFields { // Path occurs at either index 6 or 7 depending on whether inode is // already present. pathIdx := 7 diff --git a/e2e/vendor/github.com/prometheus/procfs/proc.go b/e2e/vendor/github.com/prometheus/procfs/proc.go index 142796368fe..368187fa884 100644 --- a/e2e/vendor/github.com/prometheus/procfs/proc.go +++ b/e2e/vendor/github.com/prometheus/procfs/proc.go @@ -37,9 +37,9 @@ type Proc struct { type Procs []Proc var ( - ErrFileParse = errors.New("Error Parsing File") - ErrFileRead = errors.New("Error Reading File") - ErrMountPoint = errors.New("Error Accessing Mount point") + ErrFileParse = errors.New("error parsing file") + ErrFileRead = errors.New("error reading file") + ErrMountPoint = errors.New("error accessing mount point") ) func (p Procs) Len() int { return len(p) } @@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) { if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) if err != nil { return Proc{}, err } diff --git a/e2e/vendor/github.com/prometheus/procfs/proc_cgroup.go b/e2e/vendor/github.com/prometheus/procfs/proc_cgroup.go index daeed7f571a..4a64347c03a 100644 --- a/e2e/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/e2e/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -24,7 +24,7 @@ import ( ) // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of diff --git a/e2e/vendor/github.com/prometheus/procfs/proc_io.go b/e2e/vendor/github.com/prometheus/procfs/proc_io.go index 776f3497173..d15b66ddb64 100644 --- a/e2e/vendor/github.com/prometheus/procfs/proc_io.go +++ b/e2e/vendor/github.com/prometheus/procfs/proc_io.go @@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) { ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" + "cancelled_write_bytes: %d\n" //nolint:misspell _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) diff --git a/e2e/vendor/github.com/prometheus/procfs/proc_netstat.go b/e2e/vendor/github.com/prometheus/procfs/proc_netstat.go index 8e3ff4d794b..4248c1716ee 100644 --- a/e2e/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/e2e/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = &value + procNetstat.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = &value + procNetstat.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = &value + procNetstat.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = &value + procNetstat.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = &value + procNetstat.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = &value + procNetstat.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = &value + procNetstat.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = &value + procNetstat.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = &value + procNetstat.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = &value + procNetstat.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = &value + procNetstat.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = &value + procNetstat.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = &value + procNetstat.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = &value + procNetstat.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = &value + procNetstat.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = &value + procNetstat.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = &value + procNetstat.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = &value + procNetstat.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = &value + procNetstat.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = &value + procNetstat.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = &value + procNetstat.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = &value + procNetstat.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = &value + procNetstat.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = &value + procNetstat.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = &value + procNetstat.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = &value + procNetstat.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = &value + procNetstat.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = &value + procNetstat.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = &value + procNetstat.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = &value + procNetstat.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = &value + procNetstat.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = &value + procNetstat.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = &value + procNetstat.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = &value + procNetstat.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = &value + procNetstat.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = &value + procNetstat.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = &value + procNetstat.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = &value + procNetstat.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = &value + procNetstat.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = &value + procNetstat.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = &value + procNetstat.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = &value + procNetstat.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = &value + procNetstat.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = &value + procNetstat.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = &value + procNetstat.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = &value + procNetstat.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = &value + procNetstat.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = &value + procNetstat.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = &value + procNetstat.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = &value + procNetstat.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = &value + procNetstat.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = &value + procNetstat.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = &value + procNetstat.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = &value + procNetstat.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = &value + procNetstat.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = &value + procNetstat.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = &value + procNetstat.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = &value + procNetstat.TCPRcvCoalesce = &value case "TCPRcvQDrop": - procNetstat.TcpExt.TCPRcvQDrop = &value + procNetstat.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = &value + procNetstat.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = &value + procNetstat.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = &value + procNetstat.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = &value + procNetstat.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = &value + procNetstat.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = &value + procNetstat.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = &value + procNetstat.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = &value + procNetstat.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + procNetstat.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + procNetstat.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + procNetstat.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = &value + procNetstat.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + procNetstat.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = &value + procNetstat.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = &value + procNetstat.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + procNetstat.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = &value + procNetstat.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + procNetstat.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = &value + procNetstat.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = &value + procNetstat.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = &value + procNetstat.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = &value + procNetstat.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = &value + procNetstat.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = &value + procNetstat.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + procNetstat.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = &value + procNetstat.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = &value + procNetstat.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + procNetstat.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + procNetstat.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = &value + procNetstat.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = &value + procNetstat.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = &value + procNetstat.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = &value + procNetstat.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = &value + procNetstat.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = &value + procNetstat.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = &value + procNetstat.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = &value + procNetstat.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = &value + procNetstat.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = &value + procNetstat.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = &value + procNetstat.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = &value + procNetstat.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = &value + procNetstat.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = &value + procNetstat.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = &value + procNetstat.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = &value + procNetstat.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = &value + procNetstat.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = &value + procNetstat.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = &value + procNetstat.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = &value + procNetstat.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = &value + procNetstat.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = &value + procNetstat.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = &value + procNetstat.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = &value + procNetstat.ReasmOverlaps = &value } } } diff --git a/e2e/vendor/github.com/prometheus/procfs/proc_smaps.go b/e2e/vendor/github.com/prometheus/procfs/proc_smaps.go index 09060e82080..9a297afcf89 100644 --- a/e2e/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/e2e/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -19,7 +19,6 @@ package procfs import ( "bufio" "errors" - "fmt" "os" "regexp" "strconv" @@ -29,7 +28,7 @@ import ( ) var ( - // match the header line before each mapped zone in `/proc/pid/smaps`. + // Match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) @@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { func (s *ProcSMapsRollup) parseLine(line string) error { kv := strings.SplitN(line, ":", 2) if len(kv) != 2 { - fmt.Println(line) return errors.New("invalid net/dev line, missing colon") } diff --git a/e2e/vendor/github.com/prometheus/procfs/proc_snmp.go b/e2e/vendor/github.com/prometheus/procfs/proc_snmp.go index b9d2cf642a7..4bdc90b07ea 100644 --- a/e2e/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/e2e/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = &value + procSnmp.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = &value + procSnmp.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = &value + procSnmp.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = &value + procSnmp.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = &value + procSnmp.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = &value + procSnmp.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = &value + procSnmp.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = &value + procSnmp.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = &value + procSnmp.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = &value + procSnmp.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = &value + procSnmp.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = &value + procSnmp.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = &value + procSnmp.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = &value + procSnmp.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = &value + procSnmp.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = &value + procSnmp.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = &value + procSnmp.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = &value + procSnmp.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = &value + procSnmp.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = &value + procSnmp.InMsgs = &value case "InErrors": procSnmp.Icmp.InErrors = &value case "InCsumErrors": procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = &value + procSnmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = &value + procSnmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = &value + procSnmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = &value + procSnmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = &value + procSnmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = &value + procSnmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = &value + procSnmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = &value + procSnmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = &value + procSnmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = &value + procSnmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = &value + procSnmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = &value + procSnmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = &value + procSnmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = &value + procSnmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = &value + procSnmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = &value + procSnmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = &value + procSnmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = &value + procSnmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = &value + procSnmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = &value + procSnmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = &value + procSnmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = &value + procSnmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = &value + procSnmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = &value + procSnmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = &value + procSnmp.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = &value + procSnmp.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = &value + procSnmp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = &value + procSnmp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = &value + procSnmp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = &value + procSnmp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = &value + procSnmp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = &value + procSnmp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = &value + procSnmp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = &value + procSnmp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = &value + procSnmp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = &value + procSnmp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = &value + procSnmp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = &value + procSnmp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = &value + procSnmp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = &value + procSnmp.OutRsts = &value case "InCsumErrors": procSnmp.Tcp.InCsumErrors = &value } diff --git a/e2e/vendor/github.com/prometheus/procfs/proc_snmp6.go b/e2e/vendor/github.com/prometheus/procfs/proc_snmp6.go index 3059cc6a136..fb7fd3995be 100644 --- a/e2e/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/e2e/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = &value + procSnmp6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = &value + procSnmp6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = &value + procSnmp6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = &value + procSnmp6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = &value + procSnmp6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = &value + procSnmp6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = &value + procSnmp6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = &value + procSnmp6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = &value + procSnmp6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = &value + procSnmp6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = &value + procSnmp6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = &value + procSnmp6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = &value + procSnmp6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = &value + procSnmp6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = &value + procSnmp6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = &value + procSnmp6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = &value + procSnmp6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = &value + procSnmp6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = &value + procSnmp6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = &value + procSnmp6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = &value + procSnmp6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = &value + procSnmp6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = &value + procSnmp6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = &value + procSnmp6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = &value + procSnmp6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = &value + procSnmp6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = &value + procSnmp6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = &value + procSnmp6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = &value + procSnmp6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = &value + procSnmp6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = &value + procSnmp6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = &value + procSnmp6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = &value + procSnmp6.InMsgs = &value case "InErrors": procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = &value + procSnmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = &value + procSnmp6.OutErrors = &value case "InCsumErrors": procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = &value + procSnmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = &value + procSnmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = &value + procSnmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = &value + procSnmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = &value + procSnmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = &value + procSnmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = &value + procSnmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = &value + procSnmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = &value + procSnmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = &value + procSnmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = &value + procSnmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = &value + procSnmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = &value + procSnmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = &value + procSnmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = &value + procSnmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = &value + procSnmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = &value + procSnmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = &value + procSnmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = &value + procSnmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = &value + procSnmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = &value + procSnmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = &value + procSnmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = &value + procSnmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = &value + procSnmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = &value + procSnmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = &value + procSnmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = &value + procSnmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = &value + procSnmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = &value + procSnmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = &value + procSnmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = &value + procSnmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = &value + procSnmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = &value + procSnmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = &value + procSnmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = &value + procSnmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = &value + procSnmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = &value + procSnmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = &value + procSnmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = &value + procSnmp6.OutType143 = &value } case "Udp6": switch key { @@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "InCsumErrors": procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = &value + procSnmp6.IgnoredMulti = &value } case "UdpLite6": switch key { diff --git a/e2e/vendor/github.com/prometheus/procfs/proc_status.go b/e2e/vendor/github.com/prometheus/procfs/proc_status.go index a055197c63e..dd8aa56885e 100644 --- a/e2e/vendor/github.com/prometheus/procfs/proc_status.go +++ b/e2e/vendor/github.com/prometheus/procfs/proc_status.go @@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt } } case "NSpid": - s.NSpids = calcNSPidsList(vString) + nspids, err := calcNSPidsList(vString) + if err != nil { + return err + } + s.NSpids = nspids case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 { return g } -func calcNSPidsList(nspidsString string) []uint64 { - s := strings.Split(nspidsString, " ") +func calcNSPidsList(nspidsString string) ([]uint64, error) { + s := strings.Split(nspidsString, "\t") var nspids []uint64 for _, nspid := range s { - nspid, _ := strconv.ParseUint(nspid, 10, 64) - if nspid == 0 { - continue + nspid, err := strconv.ParseUint(nspid, 10, 64) + if err != nil { + return nil, err } nspids = append(nspids, nspid) } - return nspids + return nspids, nil } diff --git a/e2e/vendor/github.com/prometheus/procfs/proc_sys.go b/e2e/vendor/github.com/prometheus/procfs/proc_sys.go index 5eefbe2ef8b..3810d1ac999 100644 --- a/e2e/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/e2e/vendor/github.com/prometheus/procfs/proc_sys.go @@ -21,7 +21,7 @@ import ( ) func sysctlToPath(sysctl string) string { - return strings.Replace(sysctl, ".", "/", -1) + return strings.ReplaceAll(sysctl, ".", "/") } func (fs FS) SysctlStrings(sysctl string) ([]string, error) { diff --git a/e2e/vendor/github.com/prometheus/procfs/softirqs.go b/e2e/vendor/github.com/prometheus/procfs/softirqs.go index 28708e07459..403e6ae7086 100644 --- a/e2e/vendor/github.com/prometheus/procfs/softirqs.go +++ b/e2e/vendor/github.com/prometheus/procfs/softirqs.go @@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { if len(parts) < 2 { continue } - switch { - case parts[0] == "HI:": + switch parts[0] { + case "HI:": perCPU := parts[1:] softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TIMER:": + case "TIMER:": perCPU := parts[1:] softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_TX:": + case "NET_TX:": perCPU := parts[1:] softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_RX:": + case "NET_RX:": perCPU := parts[1:] softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "BLOCK:": + case "BLOCK:": perCPU := parts[1:] softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "IRQ_POLL:": + case "IRQ_POLL:": perCPU := parts[1:] softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TASKLET:": + case "TASKLET:": perCPU := parts[1:] softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "SCHED:": + case "SCHED:": perCPU := parts[1:] softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "HRTIMER:": + case "HRTIMER:": perCPU := parts[1:] softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "RCU:": + case "RCU:": perCPU := parts[1:] softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { diff --git a/e2e/vendor/go.opentelemetry.io/otel/.golangci.yml b/e2e/vendor/go.opentelemetry.io/otel/.golangci.yml index c58e48ab0c4..888e5da8028 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/e2e/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -1,13 +1,9 @@ -# See https://github.com/golangci/golangci-lint#config-file +version: "2" run: - issues-exit-code: 1 #Default - tests: true #Default - + issues-exit-code: 1 + tests: true linters: - # Disable everything by default so upgrades to not include new "default - # enabled" linters. - disable-all: true - # Specifically enable linters we want to use. + default: none enable: - asasalint - bodyclose @@ -15,10 +11,7 @@ linters: - errcheck - errorlint - godot - - gofumpt - - goimports - gosec - - gosimple - govet - ineffassign - misspell @@ -26,227 +19,230 @@ linters: - revive - staticcheck - testifylint - - typecheck - unconvert - - unused - unparam + - unused - usestdlibvars - usetesting - + settings: + depguard: + rules: + auto/sdk: + files: + - '!internal/global/trace.go' + - ~internal/global/trace_test.go + deny: + - pkg: go.opentelemetry.io/auto/sdk + desc: Do not use SDK from automatic instrumentation. + non-tests: + files: + - '!$test' + - '!**/*test/*.go' + - '!**/internal/matchers/*.go' + deny: + - pkg: testing + - pkg: github.com/stretchr/testify + - pkg: crypto/md5 + - pkg: crypto/sha1 + - pkg: crypto/**/pkix + otel-internal: + files: + - '**/sdk/*.go' + - '**/sdk/**/*.go' + - '**/exporters/*.go' + - '**/exporters/**/*.go' + - '**/schema/*.go' + - '**/schema/**/*.go' + - '**/metric/*.go' + - '**/metric/**/*.go' + - '**/bridge/*.go' + - '**/bridge/**/*.go' + - '**/trace/*.go' + - '**/trace/**/*.go' + - '**/log/*.go' + - '**/log/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/internal$ + desc: Do not use cross-module internal packages. + - pkg: go.opentelemetry.io/otel/internal/internaltest + desc: Do not use cross-module internal packages. + - pkg: go.opentelemetry.io/otel/internal/matchers + desc: Do not use cross-module internal packages. + otlp-internal: + files: + - '!**/exporters/otlp/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/internal + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - '!**/exporters/otlp/otlpmetric/internal/*.go' + - '!**/exporters/otlp/otlpmetric/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - '!**/exporters/otlp/otlptrace/*.go' + - '!**/exporters/otlp/otlptrace/internal/**.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - ^[ ]*[-•] + # Exclude sentences prefixing a list. + - :$ + misspell: + locale: US + ignore-rules: + - cancelled + perfsprint: + int-conversion: true + err-error: true + errorf: true + sprintf1: true + strconcat: true + revive: + confidence: 0.01 + rules: + - name: blank-imports + - name: bool-literal-in-expr + - name: constant-logical-expr + - name: context-as-argument + arguments: + - allowTypesBefore: '*testing.T' + disabled: true + - name: context-keys-type + - name: deep-exit + - name: defer + arguments: + - - call-chain + - loop + - name: dot-imports + - name: duplicated-imports + - name: early-return + arguments: + - preserveScope + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + arguments: + - sayRepetitiveInsteadOfStutters + - name: flag-parameter + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + arguments: + - preserveScope + - name: package-comments + - name: range + - name: range-val-in-closure + - name: range-val-address + - name: redefines-builtin-id + - name: string-format + arguments: + - - panic + - /^[^\n]*$/ + - must not contain line breaks + - name: struct-tag + - name: superfluous-else + arguments: + - preserveScope + - name: time-equal + - name: unconditional-recursion + - name: unexported-return + - name: unhandled-error + arguments: + - fmt.Fprint + - fmt.Fprintf + - fmt.Fprintln + - fmt.Print + - fmt.Printf + - fmt.Println + - name: unnecessary-stmt + - name: useless-break + - name: var-declaration + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + - name: waitgroup-by-value + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error + exclusions: + generated: lax + presets: + - common-false-positives + - legacy + - std-error-handling + rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - linters: + - revive + path: .*internal/.* + text: exported (method|function|type|const) (.+) should have comment or be unexported + # Yes, they are, but it's okay in a test. + - linters: + - revive + path: _test\.go + text: exported func.*returns unexported type.*which can be annoying to use + # Example test functions should be treated like main. + - linters: + - revive + path: example.*_test\.go + text: calls to (.+) only in main[(][)] or init[(][)] functions + # It's okay to not run gosec and perfsprint in a test. + - linters: + - gosec + - perfsprint + path: _test\.go + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - linters: + - gosec + text: 'G404:' + # Ignoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - linters: + - gosec + text: 'G402: TLS MinVersion too low.' + paths: + - third_party$ + - builtin$ + - examples$ issues: - # Maximum issues count per one linter. - # Set to 0 to disable. - # Default: 50 - # Setting to unlimited so the linter only is run once to debug all issues. max-issues-per-linter: 0 - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 - # Setting to unlimited so the linter only is run once to debug all issues. max-same-issues: 0 - # Excluding configuration per-path, per-linter, per-text and per-source. - exclude-rules: - # TODO: Having appropriate comments for exported objects helps development, - # even for objects in internal packages. Appropriate comments for all - # exported objects should be added and this exclusion removed. - - path: '.*internal/.*' - text: "exported (method|function|type|const) (.+) should have comment or be unexported" - linters: - - revive - # Yes, they are, but it's okay in a test. - - path: _test\.go - text: "exported func.*returns unexported type.*which can be annoying to use" - linters: - - revive - # Example test functions should be treated like main. - - path: example.*_test\.go - text: "calls to (.+) only in main[(][)] or init[(][)] functions" - linters: - - revive - # It's okay to not run gosec and perfsprint in a test. - - path: _test\.go - linters: - - gosec - - perfsprint - # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) - # as we commonly use it in tests and examples. - - text: "G404:" - linters: - - gosec - # Ignoring gosec G402: TLS MinVersion too low - # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - - text: "G402: TLS MinVersion too low." - linters: - - gosec - include: - # revive exported should have comment or be unexported. - - EXC0012 - # revive package comment should be of the form ... - - EXC0013 - -linters-settings: - depguard: - rules: - non-tests: - files: - - "!$test" - - "!**/*test/*.go" - - "!**/internal/matchers/*.go" - deny: - - pkg: "testing" - - pkg: "github.com/stretchr/testify" - - pkg: "crypto/md5" - - pkg: "crypto/sha1" - - pkg: "crypto/**/pkix" - auto/sdk: - files: - - "!internal/global/trace.go" - - "~internal/global/trace_test.go" - deny: - - pkg: "go.opentelemetry.io/auto/sdk" - desc: Do not use SDK from automatic instrumentation. - otlp-internal: - files: - - "!**/exporters/otlp/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" - desc: Do not use cross-module internal packages. - otlptrace-internal: - files: - - "!**/exporters/otlp/otlptrace/*.go" - - "!**/exporters/otlp/otlptrace/internal/**.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" - desc: Do not use cross-module internal packages. - otlpmetric-internal: - files: - - "!**/exporters/otlp/otlpmetric/internal/*.go" - - "!**/exporters/otlp/otlpmetric/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" - desc: Do not use cross-module internal packages. - otel-internal: - files: - - "**/sdk/*.go" - - "**/sdk/**/*.go" - - "**/exporters/*.go" - - "**/exporters/**/*.go" - - "**/schema/*.go" - - "**/schema/**/*.go" - - "**/metric/*.go" - - "**/metric/**/*.go" - - "**/bridge/*.go" - - "**/bridge/**/*.go" - - "**/trace/*.go" - - "**/trace/**/*.go" - - "**/log/*.go" - - "**/log/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/internal$" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/attribute" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/internaltest" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/matchers" - desc: Do not use cross-module internal packages. - godot: - exclude: - # Exclude links. - - '^ *\[[^]]+\]:' - # Exclude sentence fragments for lists. - - '^[ ]*[-•]' - # Exclude sentences prefixing a list. - - ':$' - goimports: - local-prefixes: go.opentelemetry.io - misspell: - locale: US - ignore-words: - - cancelled - perfsprint: - err-error: true - errorf: true - int-conversion: true - sprintf1: true - strconcat: true - revive: - # Sets the default failure confidence. - # This means that linting errors with less than 0.8 confidence will be ignored. - # Default: 0.8 - confidence: 0.01 - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - rules: - - name: blank-imports - - name: bool-literal-in-expr - - name: constant-logical-expr - - name: context-as-argument - disabled: true - arguments: - - allowTypesBefore: "*testing.T" - - name: context-keys-type - - name: deep-exit - - name: defer - arguments: - - ["call-chain", "loop"] - - name: dot-imports - - name: duplicated-imports - - name: early-return - arguments: - - "preserveScope" - - name: empty-block - - name: empty-lines - - name: error-naming - - name: error-return - - name: error-strings - - name: errorf - - name: exported - arguments: - - "sayRepetitiveInsteadOfStutters" - - name: flag-parameter - - name: identical-branches - - name: if-return - - name: import-shadowing - - name: increment-decrement - - name: indent-error-flow - arguments: - - "preserveScope" - - name: package-comments - - name: range - - name: range-val-in-closure - - name: range-val-address - - name: redefines-builtin-id - - name: string-format - arguments: - - - panic - - '/^[^\n]*$/' - - must not contain line breaks - - name: struct-tag - - name: superfluous-else - arguments: - - "preserveScope" - - name: time-equal - - name: unconditional-recursion - - name: unexported-return - - name: unhandled-error - arguments: - - "fmt.Fprint" - - "fmt.Fprintf" - - "fmt.Fprintln" - - "fmt.Print" - - "fmt.Printf" - - "fmt.Println" - - name: unnecessary-stmt - - name: useless-break - - name: var-declaration - - name: var-naming - arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - - name: waitgroup-by-value - testifylint: - enable-all: true - disable: - - float-compare - - go-require - - require-error +formatters: + enable: + - gofumpt + - goimports + - golines + settings: + goimports: + local-prefixes: + - go.opentelemetry.io + golines: + max-len: 120 + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/e2e/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/e2e/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c076db2823f..648e4abab8c 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/e2e/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,57 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.36.0/0.58.0/0.12.0] 2025-05-20 + +### Added + +- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421) +- The `go.opentelemetry.io/otel/semconv/v1.31.0` package. + The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479) +- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688) +- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973) +- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973) +- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973) +- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662) +- The `go.opentelemetry.io/otel/semconv/v1.32.0` package. + The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782) +- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794) +- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796) + +### Removed + +- Drop support for [Go 1.22]. (#6381, #6418) +- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494) +- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492) +- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662) + +### Changed + +- ⚠️ Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433) +- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455) +- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465) +- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466) +- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507) +- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641) + +### Deprecated + +- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449) + +### Fixes + +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392) +- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456) +- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472) + ## [1.35.0/0.57.0/0.11.0] 2025-03-05 This release is the last to support [Go 1.22]. @@ -3237,7 +3288,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...HEAD +[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 [1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 [1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 diff --git a/e2e/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/e2e/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 7b8af585aab..1902dac057a 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/e2e/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -643,6 +643,7 @@ should be canceled. ### Triagers +- [Alex Kats](https://github.com/akats7), Capital One - [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent ### Approvers diff --git a/e2e/vendor/go.opentelemetry.io/otel/Makefile b/e2e/vendor/go.opentelemetry.io/otel/Makefile index 226410d7428..62a56f4d341 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/Makefile +++ b/e2e/vendor/go.opentelemetry.io/otel/Makefile @@ -43,8 +43,11 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit +VERIFYREADMES = $(TOOLS)/verifyreadmes +$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes + GOLANGCI_LINT = $(TOOLS)/golangci-lint -$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint MISSPELL = $(TOOLS)/misspell $(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell @@ -68,7 +71,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -213,11 +216,8 @@ go-mod-tidy/%: crosslink && cd $(DIR) \ && $(GO) mod tidy -compat=1.21 -.PHONY: lint-modules -lint-modules: go-mod-tidy - .PHONY: lint -lint: misspell lint-modules golangci-lint govulncheck +lint: misspell go-mod-tidy golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: $(PORTO) @@ -319,10 +319,11 @@ add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} +MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) .PHONY: lint-markdown lint-markdown: - docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md .PHONY: verify-readmes -verify-readmes: - ./verify_readmes.sh +verify-readmes: $(VERIFYREADMES) + $(VERIFYREADMES) diff --git a/e2e/vendor/go.opentelemetry.io/otel/README.md b/e2e/vendor/go.opentelemetry.io/otel/README.md index 8421cd7e597..b6007881212 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/README.md +++ b/e2e/vendor/go.opentelemetry.io/otel/README.md @@ -6,6 +6,7 @@ [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) +[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). @@ -53,25 +54,18 @@ Currently, this project supports the following environments. |----------|------------|--------------| | Ubuntu | 1.24 | amd64 | | Ubuntu | 1.23 | amd64 | -| Ubuntu | 1.22 | amd64 | | Ubuntu | 1.24 | 386 | | Ubuntu | 1.23 | 386 | -| Ubuntu | 1.22 | 386 | | Ubuntu | 1.24 | arm64 | | Ubuntu | 1.23 | arm64 | -| Ubuntu | 1.22 | arm64 | | macOS 13 | 1.24 | amd64 | | macOS 13 | 1.23 | amd64 | -| macOS 13 | 1.22 | amd64 | | macOS | 1.24 | arm64 | | macOS | 1.23 | arm64 | -| macOS | 1.22 | arm64 | | Windows | 1.24 | amd64 | | Windows | 1.23 | amd64 | -| Windows | 1.22 | amd64 | | Windows | 1.24 | 386 | | Windows | 1.23 | 386 | -| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/e2e/vendor/go.opentelemetry.io/otel/RELEASING.md b/e2e/vendor/go.opentelemetry.io/otel/RELEASING.md index 1e13ae54f71..7c1a9119dcc 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/e2e/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -1,5 +1,9 @@ # Release Process +## Create a `Version Release` issue + +Create a `Version Release` issue to track the release process. + ## Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. @@ -123,6 +127,16 @@ Importantly, bump any package versions referenced to be the latest one you just [Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ [content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go +### Close the milestone + +Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone. +This helps track what changes were included in each release. + +- To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr) +- To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged). + +Once all related issues and PRs have been added to the milestone, close the milestone. + ### Demo Repository Bump the dependencies in the following Go services: @@ -130,3 +144,7 @@ Bump the dependencies in the following Go services: - [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) - [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) - [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) + +### Close the `Version Release` issue + +Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/e2e/vendor/go.opentelemetry.io/otel/attribute/filter.go b/e2e/vendor/go.opentelemetry.io/otel/attribute/filter.go index be9cd922d87..3eeaa5d4426 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/e2e/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -19,7 +19,7 @@ func NewAllowKeysFilter(keys ...Key) Filter { return func(kv KeyValue) bool { return false } } - allowed := make(map[Key]struct{}) + allowed := make(map[Key]struct{}, len(keys)) for _, k := range keys { allowed[k] = struct{}{} } @@ -38,7 +38,7 @@ func NewDenyKeysFilter(keys ...Key) Filter { return func(kv KeyValue) bool { return true } } - forbid := make(map[Key]struct{}) + forbid := make(map[Key]struct{}, len(keys)) for _, k := range keys { forbid[k] = struct{}{} } diff --git a/e2e/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/e2e/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go similarity index 97% rename from e2e/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go rename to e2e/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index 691d96c7554..b76d2bbfdbd 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/e2e/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -5,7 +5,7 @@ Package attribute provide several helper functions for some commonly used logic of processing attributes. */ -package attribute // import "go.opentelemetry.io/otel/internal/attribute" +package attribute // import "go.opentelemetry.io/otel/attribute/internal" import ( "reflect" diff --git a/e2e/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go b/e2e/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go new file mode 100644 index 00000000000..5791c6e7aaa --- /dev/null +++ b/e2e/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "math" +) + +func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func rawToBool(r uint64) bool { + return r != 0 +} + +func int64ToRaw(i int64) uint64 { + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec +} + +func rawToInt64(r uint64) int64 { + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec +} + +func float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func rawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} diff --git a/e2e/vendor/go.opentelemetry.io/otel/attribute/value.go b/e2e/vendor/go.opentelemetry.io/otel/attribute/value.go index 9ea0ecbbd27..817eecacf11 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/e2e/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -9,8 +9,7 @@ import ( "reflect" "strconv" - "go.opentelemetry.io/otel/internal" - "go.opentelemetry.io/otel/internal/attribute" + attribute "go.opentelemetry.io/otel/attribute/internal" ) //go:generate stringer -type=Type @@ -51,7 +50,7 @@ const ( func BoolValue(v bool) Value { return Value{ vtype: BOOL, - numeric: internal.BoolToRaw(v), + numeric: boolToRaw(v), } } @@ -82,7 +81,7 @@ func IntSliceValue(v []int) Value { func Int64Value(v int64) Value { return Value{ vtype: INT64, - numeric: internal.Int64ToRaw(v), + numeric: int64ToRaw(v), } } @@ -95,7 +94,7 @@ func Int64SliceValue(v []int64) Value { func Float64Value(v float64) Value { return Value{ vtype: FLOAT64, - numeric: internal.Float64ToRaw(v), + numeric: float64ToRaw(v), } } @@ -125,7 +124,7 @@ func (v Value) Type() Type { // AsBool returns the bool value. Make sure that the Value's type is // BOOL. func (v Value) AsBool() bool { - return internal.RawToBool(v.numeric) + return rawToBool(v.numeric) } // AsBoolSlice returns the []bool value. Make sure that the Value's type is @@ -144,7 +143,7 @@ func (v Value) asBoolSlice() []bool { // AsInt64 returns the int64 value. Make sure that the Value's type is // INT64. func (v Value) AsInt64() int64 { - return internal.RawToInt64(v.numeric) + return rawToInt64(v.numeric) } // AsInt64Slice returns the []int64 value. Make sure that the Value's type is @@ -163,7 +162,7 @@ func (v Value) asInt64Slice() []int64 { // AsFloat64 returns the float64 value. Make sure that the Value's // type is FLOAT64. func (v Value) AsFloat64() float64 { - return internal.RawToFloat64(v.numeric) + return rawToFloat64(v.numeric) } // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is diff --git a/e2e/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/e2e/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index e4c4a753c88..51fb76b30d0 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/e2e/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,3 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python -FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver +FROM python:3.13.3-slim-bullseye@sha256:9e3f9243e06fd68eb9519074b49878eda20ad39a855fac51aaffb741de20726e AS python +FROM otel/weaver:v0.15.0@sha256:1cf1c72eaed57dad813c2e359133b8a15bd4facf305aae5b13bdca6d3eccff56 AS weaver +FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/e2e/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/e2e/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh deleted file mode 100644 index 93e80ea306c..00000000000 --- a/e2e/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -top_dir='.' -if [[ $# -gt 0 ]]; then - top_dir="${1}" -fi - -p=$(pwd) -mod_dirs=() - -# Note `mapfile` does not exist in older bash versions: -# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash - -while IFS= read -r line; do - mod_dirs+=("$line") -done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) - -for mod_dir in "${mod_dirs[@]}"; do - cd "${mod_dir}" - - while IFS= read -r line; do - echo ".${line#${p}}" - done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') - cd "${p}" -done diff --git a/e2e/vendor/go.opentelemetry.io/otel/internal/gen.go b/e2e/vendor/go.opentelemetry.io/otel/internal/gen.go deleted file mode 100644 index 4259f0320d4..00000000000 --- a/e2e/vendor/go.opentelemetry.io/otel/internal/gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go -//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go -//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go - -//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go -//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go -//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go -//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go -//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/e2e/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/e2e/vendor/go.opentelemetry.io/otel/internal/global/handler.go index c657ff8e755..2e47b2964c8 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/e2e/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package global provides the OpenTelemetry global API. package global // import "go.opentelemetry.io/otel/internal/global" import ( diff --git a/e2e/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/e2e/vendor/go.opentelemetry.io/otel/internal/global/meter.go index a6acd8dca66..adb37b5b0e7 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/e2e/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -169,7 +169,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) return i, nil } -func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +func (m *meter) Int64UpDownCounter( + name string, + options ...metric.Int64UpDownCounterOption, +) (metric.Int64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -238,7 +241,10 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met return i, nil } -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (m *meter) Int64ObservableCounter( + name string, + options ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -261,7 +267,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser return i, nil } -func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (m *meter) Int64ObservableUpDownCounter( + name string, + options ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -284,7 +293,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 return i, nil } -func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +func (m *meter) Int64ObservableGauge( + name string, + options ...metric.Int64ObservableGaugeOption, +) (metric.Int64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -330,7 +342,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti return i, nil } -func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +func (m *meter) Float64UpDownCounter( + name string, + options ...metric.Float64UpDownCounterOption, +) (metric.Float64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -353,7 +368,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow return i, nil } -func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +func (m *meter) Float64Histogram( + name string, + options ...metric.Float64HistogramOption, +) (metric.Float64Histogram, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -399,7 +417,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) return i, nil } -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (m *meter) Float64ObservableCounter( + name string, + options ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -422,7 +443,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O return i, nil } -func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (m *meter) Float64ObservableUpDownCounter( + name string, + options ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -445,7 +469,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl return i, nil } -func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (m *meter) Float64ObservableGauge( + name string, + options ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/e2e/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/e2e/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 8982aa0dc56..49e4ac4faab 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/e2e/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -158,7 +158,18 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart // a nonRecordingSpan by default. var autoInstEnabled = new(bool) -func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { +// newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF +// uprobe to this code. +// +// "noinline" pragma prevents the method from ever being inlined. +// +//go:noinline +func (t *tracer) newSpan( + ctx context.Context, + autoSpan *bool, + name string, + opts []trace.SpanStartOption, +) (context.Context, trace.Span) { // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is // so the auto-instrumentation can define a uprobe for (*t).newSpan and be // provided with the address of the bool autoInstEnabled points to. It diff --git a/e2e/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/e2e/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go deleted file mode 100644 index b2fe3e41d3b..00000000000 --- a/e2e/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -import ( - "math" - "unsafe" -) - -func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. - if b { - return 1 - } - return 0 -} - -func RawToBool(r uint64) bool { - return r != 0 -} - -func Int64ToRaw(i int64) uint64 { - // Assumes original was a valid int64 (overflow not checked). - return uint64(i) // nolint: gosec -} - -func RawToInt64(r uint64) int64 { - // Assumes original was a valid int64 (overflow not checked). - return int64(r) // nolint: gosec -} - -func Float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func RawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} - -func RawPtrToFloat64Ptr(r *uint64) *float64 { - // Assumes original was a valid *float64 (overflow not checked). - return (*float64)(unsafe.Pointer(r)) // nolint: gosec -} - -func RawPtrToInt64Ptr(r *uint64) *int64 { - // Assumes original was a valid *int64 (overflow not checked). - return (*int64)(unsafe.Pointer(r)) // nolint: gosec -} diff --git a/e2e/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/e2e/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index f8435d8f288..b7fc973a66c 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/e2e/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -106,7 +106,9 @@ type Float64ObservableUpDownCounterConfig struct { // NewFloat64ObservableUpDownCounterConfig returns a new // [Float64ObservableUpDownCounterConfig] with all opts applied. -func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { +func NewFloat64ObservableUpDownCounterConfig( + opts ...Float64ObservableUpDownCounterOption, +) Float64ObservableUpDownCounterConfig { var config Float64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyFloat64ObservableUpDownCounter(config) @@ -239,12 +241,16 @@ type float64CallbackOpt struct { cback Float64Callback } -func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableCounter( + cfg Float64ObservableCounterConfig, +) Float64ObservableCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } -func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter( + cfg Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/e2e/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/e2e/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index e079aaef169..4404b71a22f 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/e2e/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -105,7 +105,9 @@ type Int64ObservableUpDownCounterConfig struct { // NewInt64ObservableUpDownCounterConfig returns a new // [Int64ObservableUpDownCounterConfig] with all opts applied. -func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { +func NewInt64ObservableUpDownCounterConfig( + opts ...Int64ObservableUpDownCounterOption, +) Int64ObservableUpDownCounterConfig { var config Int64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyInt64ObservableUpDownCounter(config) @@ -242,7 +244,9 @@ func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounter return cfg } -func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter( + cfg Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/e2e/vendor/go.opentelemetry.io/otel/metric/instrument.go b/e2e/vendor/go.opentelemetry.io/otel/metric/instrument.go index a535782e1d9..9f48d5f117c 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/e2e/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -63,7 +63,9 @@ func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o descOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -98,7 +100,9 @@ func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o descOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -138,7 +142,9 @@ func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o unitOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.unit = string(o) return c } @@ -173,7 +179,9 @@ func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o unitOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.unit = string(o) return c } diff --git a/e2e/vendor/go.opentelemetry.io/otel/metric/meter.go b/e2e/vendor/go.opentelemetry.io/otel/metric/meter.go index 14e08c24a4b..fdd2a7011c3 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/e2e/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -110,7 +110,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + Int64ObservableUpDownCounter( + name string, + options ...Int64ObservableUpDownCounterOption, + ) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -194,7 +197,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + Float64ObservableUpDownCounter( + name string, + options ...Float64ObservableUpDownCounterOption, + ) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used diff --git a/e2e/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/e2e/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index ca6fcbdc099..9afb69e583b 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/e2e/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -86,13 +86,19 @@ func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (Meter) Int64ObservableCounter( + string, + ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { return Int64ObservableCounter{}, nil } // Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (Meter) Int64ObservableUpDownCounter( + string, + ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { return Int64ObservableUpDownCounter{}, nil } @@ -128,19 +134,28 @@ func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64G // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (Meter) Float64ObservableCounter( + string, + ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { return Float64ObservableCounter{}, nil } // Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (Meter) Float64ObservableUpDownCounter( + string, + ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { return Float64ObservableUpDownCounter{}, nil } // Float64ObservableGauge returns an ObservableGauge used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (Meter) Float64ObservableGauge( + string, + ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { return Float64ObservableGauge{}, nil } diff --git a/e2e/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/e2e/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 552263ba734..ebda5026d6b 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/e2e/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -28,7 +28,21 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { } // Extract returns a copy of parent with the baggage from the carrier added. +// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked +// for multiple values extraction. Otherwise, Get is called. func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + if multiCarrier, ok := carrier.(ValuesGetter); ok { + return extractMultiBaggage(parent, multiCarrier) + } + return extractSingleBaggage(parent, carrier) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} + +func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context { bStr := carrier.Get(baggageHeader) if bStr == "" { return parent @@ -41,7 +55,23 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context return baggage.ContextWithBaggage(parent, bag) } -// Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { - return []string{baggageHeader} +func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context { + bVals := carrier.Values(baggageHeader) + if len(bVals) == 0 { + return parent + } + var members []baggage.Member + for _, bStr := range bVals { + currBag, err := baggage.Parse(bStr) + if err != nil { + continue + } + members = append(members, currBag.Members()...) + } + + b, err := baggage.New(members...) + if err != nil || b.Len() == 0 { + return parent + } + return baggage.ContextWithBaggage(parent, b) } diff --git a/e2e/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/e2e/vendor/go.opentelemetry.io/otel/propagation/propagation.go index 8c8286aab4d..5c8c26ea2eb 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/e2e/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -9,6 +9,7 @@ import ( ) // TextMapCarrier is the storage medium used by a TextMapPropagator. +// See ValuesGetter for how a TextMapCarrier can get multiple values for a key. type TextMapCarrier interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -29,6 +30,18 @@ type TextMapCarrier interface { // must never be done outside of a new major release. } +// ValuesGetter can return multiple values for a single key, +// with contrast to TextMapCarrier.Get which returns a single value. +type ValuesGetter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Values returns all values associated with the passed key. + Values(key string) []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + // MapCarrier is a TextMapCarrier that uses a map held in memory as a storage // medium for propagated key-value pairs. type MapCarrier map[string]string @@ -55,14 +68,25 @@ func (c MapCarrier) Keys() []string { return keys } -// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces. type HeaderCarrier http.Header -// Get returns the value associated with the passed key. +// Compile time check that HeaderCarrier implements ValuesGetter. +var _ TextMapCarrier = HeaderCarrier{} + +// Compile time check that HeaderCarrier implements TextMapCarrier. +var _ ValuesGetter = HeaderCarrier{} + +// Get returns the first value associated with the passed key. func (hc HeaderCarrier) Get(key string) string { return http.Header(hc).Get(key) } +// Values returns all values associated with the passed key. +func (hc HeaderCarrier) Values(key string) []string { + return http.Header(hc).Values(key) +} + // Set stores the key-value pair. func (hc HeaderCarrier) Set(key string, value string) { http.Header(hc).Set(key, value) @@ -89,6 +113,8 @@ type TextMapPropagator interface { // must never be done outside of a new major release. // Extract reads cross-cutting concerns from the carrier into a Context. + // Implementations may check if the carrier implements ValuesGetter, + // to support extraction of multiple values per key. Extract(ctx context.Context, carrier TextMapCarrier) context.Context // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. diff --git a/e2e/vendor/go.opentelemetry.io/otel/renovate.json b/e2e/vendor/go.opentelemetry.io/otel/renovate.json index a6fa353f95c..fa5acf2d3bd 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/renovate.json +++ b/e2e/vendor/go.opentelemetry.io/otel/renovate.json @@ -1,7 +1,8 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "config:best-practices" + "config:best-practices", + "helpers:pinGitHubActionDigestsToSemver" ], "ignorePaths": [], "labels": ["Skip Changelog", "dependencies"], @@ -25,6 +26,10 @@ { "matchPackageNames": ["golang.org/x/**"], "groupName": "golang.org/x" + }, + { + "matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"], + "enabled": false } ] } diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go index 07923ed8d94..e3309231d42 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package env provides types and functionality for environment variable support +// in the OpenTelemetry SDK. package env // import "go.opentelemetry.io/otel/sdk/internal/env" import ( diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index 925bcf99305..c8d3fb7e3cf 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -5,10 +5,8 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" - crand "crypto/rand" "encoding/binary" - "math/rand" - "sync" + "math/rand/v2" "go.opentelemetry.io/otel/trace" ) @@ -29,20 +27,15 @@ type IDGenerator interface { // must never be done outside of a new major release. } -type randomIDGenerator struct { - sync.Mutex - randSource *rand.Rand -} +type randomIDGenerator struct{} var _ IDGenerator = &randomIDGenerator{} // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { - gen.Lock() - defer gen.Unlock() sid := trace.SpanID{} for { - _, _ = gen.randSource.Read(sid[:]) + binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) if sid.IsValid() { break } @@ -53,18 +46,17 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace // NewIDs returns a non-zero trace ID and a non-zero span ID from a // randomly-chosen sequence. func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { - gen.Lock() - defer gen.Unlock() tid := trace.TraceID{} sid := trace.SpanID{} for { - _, _ = gen.randSource.Read(tid[:]) + binary.NativeEndian.PutUint64(tid[:8], rand.Uint64()) + binary.NativeEndian.PutUint64(tid[8:], rand.Uint64()) if tid.IsValid() { break } } for { - _, _ = gen.randSource.Read(sid[:]) + binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) if sid.IsValid() { break } @@ -73,9 +65,5 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. } func defaultIDGenerator() IDGenerator { - gen := &randomIDGenerator{} - var rngSeed int64 - _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) - gen.randSource = rand.New(rand.NewSource(rngSeed)) - return gen + return &randomIDGenerator{} } diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 185aa7c08f7..0e2a2e7c60d 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -169,7 +169,17 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) + global.Info( + "Tracer created", + "name", + name, + "version", + is.Version, + "schemaURL", + is.SchemaURL, + "attributes", + is.Attributes, + ) } return t } diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 43419d3b541..0b65ae9ab70 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -26,7 +26,11 @@ var _ trace.Tracer = &tracer{} // The Span is created with the provided name and as a child of any existing // span context found in the passed context. The created Span will be // configured appropriately by any SpanOption passed. -func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { +func (tr *tracer) Start( + ctx context.Context, + name string, + options ...trace.SpanStartOption, +) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(options...) if ctx == nil { @@ -112,7 +116,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo } // newRecordingSpan returns a new configured recordingSpan. -func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { +func (tr *tracer) newRecordingSpan( + psc, sc trace.SpanContext, + name string, + sr SamplingResult, + config *trace.SpanConfig, +) *recordingSpan { startTime := config.Timestamp() if startTime.IsZero() { startTime = time.Now() diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/version.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/version.go index 2b797fbdea2..1af257449ac 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -1,9 +1,10 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package sdk provides the OpenTelemetry default SDK for Go. package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.35.0" + return "1.36.0" } diff --git a/e2e/vendor/go.opentelemetry.io/otel/trace/auto.go b/e2e/vendor/go.opentelemetry.io/otel/trace/auto.go index 7e2910025a9..d90af8f673c 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/e2e/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -57,14 +57,15 @@ type autoTracer struct { var _ Tracer = autoTracer{} func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { - var psc SpanContext + var psc, sc SpanContext sampled := true span := new(autoSpan) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &span.spanContext) + t.start(ctx, span, &psc, &sampled, &sc) span.sampled.Store(sampled) + span.spanContext = sc ctx = ContextWithSpan(ctx, span) diff --git a/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go index 3c5e1cdb1b3..e7ca62c660c 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go +++ b/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go @@ -251,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -266,27 +273,31 @@ const ( type SpanKind int32 const ( - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. SpanKindInternal SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. SpanKindServer SpanKind = 2 - // Indicates that the span describes a request to some remote service. + // SpanKindClient indicates that the span describes a request to some + // remote service. SpanKindClient SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. SpanKindProducer SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// Event is a time-stamped annotation of the span, consisting of user-supplied -// text description and key-value pairs. +// SpanEvent is a time-stamped annotation of the span, consisting of +// user-supplied text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. Time time.Time `json:"timeUnixNano,omitempty"` @@ -369,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go index 1d013a8fa80..1039bf40cda 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go +++ b/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go @@ -3,17 +3,19 @@ package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" +// StatusCode is the status of a Span. +// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // The default status. + // StatusCodeUnset is the default status. StatusCodeUnset StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. StatusCodeOK StatusCode = 1 - // The Span contains an error. + // StatusCodeError is used when the Span contains an error. StatusCodeError StatusCode = 2 ) @@ -30,7 +32,7 @@ func (s StatusCode) String() string { return "" } -// The Status type defines a logical error model that is suitable for different +// Status defines a logical error model that is suitable for different // programming environments, including REST APIs and RPC APIs. type Status struct { // A developer-facing human readable error message. diff --git a/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go index b0394070814..e5f10767ca3 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go +++ b/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// A collection of ScopeSpans from a Resource. +// ResourceSpans is a collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// A collection of Spans produced by an InstrumentationScope. +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go index 7251492da06..ae9ce102a9a 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go +++ b/e2e/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -316,7 +316,7 @@ func (v Value) String() string { case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return fmt.Sprint(v.asBytes()) + return string(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: diff --git a/e2e/vendor/go.opentelemetry.io/otel/trace/noop.go b/e2e/vendor/go.opentelemetry.io/otel/trace/noop.go index c8b1ae5d67e..0f56e4dbb34 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/e2e/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -95,6 +95,8 @@ var autoInstEnabled = new(bool) // tracerProvider return a noopTracerProvider if autoEnabled is false, // otherwise it will return a TracerProvider from the sdk package used in // auto-instrumentation. +// +//go:noinline func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { if *autoEnabled { return newAutoTracerProvider() diff --git a/e2e/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/e2e/vendor/go.opentelemetry.io/otel/verify_readmes.sh deleted file mode 100644 index 1e87855eeaa..00000000000 --- a/e2e/vendor/go.opentelemetry.io/otel/verify_readmes.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) - -missingReadme=false -for dir in $dirs; do - if [ ! -f "$dir/README.md" ]; then - echo "couldn't find README.md for $dir" - missingReadme=true - fi -done - -if [ "$missingReadme" = true ] ; then - echo "Error: some READMEs couldn't be found." - exit 1 -fi diff --git a/e2e/vendor/go.opentelemetry.io/otel/version.go b/e2e/vendor/go.opentelemetry.io/otel/version.go index d5fa71f6745..ac3c0b15da2 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/version.go +++ b/e2e/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.35.0" + return "1.36.0" } diff --git a/e2e/vendor/go.opentelemetry.io/otel/versions.yaml b/e2e/vendor/go.opentelemetry.io/otel/versions.yaml index 2b4cb4b4187..79f82f3d058 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/e2e/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.35.0 + version: v1.36.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -23,11 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.57.0 + version: v0.58.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.11.0 + version: v0.12.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -40,4 +40,6 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/log/logtest + - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/e2e/vendor/golang.org/x/net/http2/http2.go b/e2e/vendor/golang.org/x/net/http2/http2.go index 6c18ea230be..ea5ae629fde 100644 --- a/e2e/vendor/golang.org/x/net/http2/http2.go +++ b/e2e/vendor/golang.org/x/net/http2/http2.go @@ -11,8 +11,6 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( diff --git a/e2e/vendor/golang.org/x/oauth2/internal/doc.go b/e2e/vendor/golang.org/x/oauth2/internal/doc.go index 03265e888af..8c7c475f2db 100644 --- a/e2e/vendor/golang.org/x/oauth2/internal/doc.go +++ b/e2e/vendor/golang.org/x/oauth2/internal/doc.go @@ -2,5 +2,5 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. +// Package internal contains support packages for [golang.org/x/oauth2]. package internal diff --git a/e2e/vendor/golang.org/x/oauth2/internal/oauth2.go b/e2e/vendor/golang.org/x/oauth2/internal/oauth2.go index 14989beaf49..71ea6ad1f5e 100644 --- a/e2e/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/e2e/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -13,7 +13,7 @@ import ( ) // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an [*rsa.PrivateKey]. It detects whether the private key is in a // PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. diff --git a/e2e/vendor/golang.org/x/oauth2/internal/token.go b/e2e/vendor/golang.org/x/oauth2/internal/token.go index e83ddeef0fc..8389f246294 100644 --- a/e2e/vendor/golang.org/x/oauth2/internal/token.go +++ b/e2e/vendor/golang.org/x/oauth2/internal/token.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "mime" "net/http" @@ -26,9 +25,9 @@ import ( // the requests to access protected resources on the OAuth 2.0 // provider's backend. // -// This type is a mirror of oauth2.Token and exists to break +// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break // an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. +// should convert this Token into an [golang.org/x/oauth2.Token] before use. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. @@ -50,9 +49,16 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // Raw optionally contains extra metadata from the server // when updating a token. - Raw interface{} + Raw any } // tokenJSON is the struct representing the HTTP response from OAuth2 @@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { return nil } -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - // AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. type AuthStyle int @@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { return c } +type authStyleCacheKey struct { + url string + clientID string +} + // AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that @@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { // small. type AuthStyleCache struct { mu sync.Mutex - m map[string]AuthStyle // keyed by tokenURL + m map[authStyleCacheKey]AuthStyle } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { +func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) { c.mu.Lock() defer c.mu.Unlock() - style, ok = c.m[tokenURL] + style, ok = c.m[authStyleCacheKey{tokenURL, clientID}] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { +func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) { c.mu.Lock() defer c.mu.Unlock() if c.m == nil { - c.m = make(map[string]AuthStyle) + c.m = make(map[authStyleCacheKey]AuthStyle) } - c.m[tokenURL] = v + c.m[authStyleCacheKey{tokenURL, clientID}] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values { } func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 + needsAuthStyleProbe := authStyle == AuthStyleUnknown if needsAuthStyleProbe { - if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - styleCache.setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, clientID, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, err } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) @@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { TokenType: tj.TokenType, RefreshToken: tj.RefreshToken, Expiry: tj.expiry(), - Raw: make(map[string]interface{}), + ExpiresIn: int64(tj.ExpiresIn), + Raw: make(map[string]any), } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } diff --git a/e2e/vendor/golang.org/x/oauth2/internal/transport.go b/e2e/vendor/golang.org/x/oauth2/internal/transport.go index b9db01ddfdf..afc0aeb274e 100644 --- a/e2e/vendor/golang.org/x/oauth2/internal/transport.go +++ b/e2e/vendor/golang.org/x/oauth2/internal/transport.go @@ -9,8 +9,8 @@ import ( "net/http" ) -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate an [*http.Client] value with a context. var HTTPClient ContextKey // ContextKey is just an empty struct. It exists so HTTPClient can be diff --git a/e2e/vendor/golang.org/x/oauth2/oauth2.go b/e2e/vendor/golang.org/x/oauth2/oauth2.go index eacdd7fd933..de34feb8442 100644 --- a/e2e/vendor/golang.org/x/oauth2/oauth2.go +++ b/e2e/vendor/golang.org/x/oauth2/oauth2.go @@ -22,9 +22,9 @@ import ( ) // NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). +// your own [context.Context]. // -// Deprecated: Use context.Background() or context.TODO() instead. +// Deprecated: Use [context.Background] or [context.TODO] instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. @@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). +// For the client credentials 2-legged OAuth2 flow, see the +// [golang.org/x/oauth2/clientcredentials] package. type Config struct { // ClientID is the application's ID. ClientID string @@ -46,7 +46,7 @@ type Config struct { // ClientSecret is the application's secret. ClientSecret string - // Endpoint contains the resource server's token endpoint + // Endpoint contains the authorization server's token endpoint // URLs. These are constants specific to each server and are // often available via site-specific packages, such as // google.Endpoint or github.Endpoint. @@ -135,7 +135,7 @@ type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters // to a provider's authorization endpoint. func SetAuthURLParam(key, value string) AuthCodeOption { return setParam{key, value} @@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // request and callback. The authorization server includes this value when // redirecting the user agent back to the client. // -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. +// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well +// as [ApprovalForce]. // // To protect against CSRF attacks, opts should include a PKCE challenge // (S256ChallengeOption). Not all servers support PKCE. An alternative is to @@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { v := url.Values{ "grant_type": {"password"}, @@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. // -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state") if you are +// The code will be in the [http.Request.FormValue]("code"). Before +// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are // using it to protect against CSRF attacks. // // If using PKCE to protect against CSRF attacks, opts should include a @@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { tkr := &tokenRefresher{ ctx: ctx, @@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { } } -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// tokenRefresher is a TokenSource that makes "grant_type=refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { ctx context.Context // used to get HTTP requests @@ -305,8 +305,7 @@ type reuseTokenSource struct { } // Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. +// refresh the current token and return the new one. func (s *reuseTokenSource) Token() (*Token, error) { s.mu.Lock() defer s.mu.Unlock() @@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { return t, nil } -// StaticTokenSource returns a TokenSource that always returns the same token. +// StaticTokenSource returns a [TokenSource] that always returns the same token. // Because the provided token t is never refreshed, StaticTokenSource is only // useful for tokens that never expire. func StaticTokenSource(t *Token) TokenSource { @@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) { return s.t, nil } -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate a [*http.Client] value with a context. var HTTPClient internal.ContextKey -// NewClient creates an *http.Client from a Context and TokenSource. +// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource]. // The returned client is not valid beyond the lifetime of the context. // -// Note that if a custom *http.Client is provided via the Context it +// Note that if a custom [*http.Client] is provided via the [context.Context] it // is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. +// [*http.Client] returned from NewClient. // // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 @@ -368,7 +367,7 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { } } -// ReuseTokenSource returns a TokenSource which repeatedly returns the +// ReuseTokenSource returns a [TokenSource] which repeatedly returns the // same token as long as it's valid, starting with t. // When its cached token is invalid, a new token is obtained from src. // @@ -376,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { // (such as a file on disk) between runs of a program, rather than // obtaining new tokens unnecessarily. // -// The initial token t may be nil, in which case the TokenSource is +// The initial token t may be nil, in which case the [TokenSource] is // wrapped in a caching version if it isn't one already. This also // means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. +// [TokenSource] without adverse effects. func ReuseTokenSource(t *Token, src TokenSource) TokenSource { // Don't wrap a reuseTokenSource in itself. That would work, // but cause an unnecessary number of mutex operations. @@ -397,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the +// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { diff --git a/e2e/vendor/golang.org/x/oauth2/pkce.go b/e2e/vendor/golang.org/x/oauth2/pkce.go index 6a95da975ce..cea8374d51b 100644 --- a/e2e/vendor/golang.org/x/oauth2/pkce.go +++ b/e2e/vendor/golang.org/x/oauth2/pkce.go @@ -1,6 +1,7 @@ // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package oauth2 import ( @@ -20,9 +21,9 @@ const ( // This follows recommendations in RFC 7636. // // A fresh verifier should be generated for each authorization. -// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange -// (or Config.DeviceAccessToken). +// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] +// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken] +// with [VerifierOption]. func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be // used to create a 32-octet sequence. The octet sequence is then @@ -36,22 +37,22 @@ func GenerateVerifier() string { return base64.RawURLEncoding.EncodeToString(data) } -// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be -// passed to Config.Exchange or Config.DeviceAccessToken only. +// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be +// passed to [Config.Exchange] or [Config.DeviceAccessToken]. func VerifierOption(verifier string) AuthCodeOption { return setParam{k: codeVerifierKey, v: verifier} } // S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. // -// Prefer to use S256ChallengeOption where possible. +// Prefer to use [S256ChallengeOption] where possible. func S256ChallengeFromVerifier(verifier string) string { sha := sha256.Sum256([]byte(verifier)) return base64.RawURLEncoding.EncodeToString(sha[:]) } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth +// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/e2e/vendor/golang.org/x/oauth2/token.go b/e2e/vendor/golang.org/x/oauth2/token.go index 8c31136c402..239ec329620 100644 --- a/e2e/vendor/golang.org/x/oauth2/token.go +++ b/e2e/vendor/golang.org/x/oauth2/token.go @@ -44,7 +44,7 @@ type Token struct { // Expiry is the optional expiration time of the access token. // - // If zero, TokenSource implementations will reuse the same + // If zero, [TokenSource] implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` @@ -58,7 +58,7 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. - raw interface{} + raw any // expiryDelta is used to calculate when a token is considered // expired, by subtracting from Expiry. If zero, defaultExpiryDelta @@ -86,16 +86,16 @@ func (t *Token) Type() string { // SetAuthHeader sets the Authorization header to r using the access // token in t. // -// This method is unnecessary when using Transport or an HTTP Client +// This method is unnecessary when using [Transport] or an HTTP Client // returned by this package. func (t *Token) SetAuthHeader(r *http.Request) { r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) } -// WithExtra returns a new Token that's a clone of t, but using the +// WithExtra returns a new [Token] that's a clone of t, but using the // provided raw extra map. This is only intended for use by packages // implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { +func (t *Token) WithExtra(extra any) *Token { t2 := new(Token) *t2 = *t t2.raw = extra @@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra returns an extra field. // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { +func (t *Token) Extra(key string) any { + if raw, ok := t.raw.(map[string]any); ok { return raw[key] } @@ -163,6 +163,7 @@ func tokenFromInternal(t *internal.Token) *Token { TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, + ExpiresIn: t.ExpiresIn, raw: t.Raw, } } diff --git a/e2e/vendor/golang.org/x/oauth2/transport.go b/e2e/vendor/golang.org/x/oauth2/transport.go index 90657915fbc..8bbebbac9ee 100644 --- a/e2e/vendor/golang.org/x/oauth2/transport.go +++ b/e2e/vendor/golang.org/x/oauth2/transport.go @@ -11,12 +11,12 @@ import ( "sync" ) -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. +// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests, +// wrapping a base [http.RoundTripper] and adding an Authorization header +// with a token from the supplied [TokenSource]. // // Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. +// higher-level [Config.Client] method instead. type Transport struct { // Source supplies the token to add to outgoing requests' // Authorization headers. @@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return nil, err } - req2 := cloneRequest(req) // per RoundTripper contract + req2 := req.Clone(req.Context()) token.SetAuthHeader(req2) // req.Body is assumed to be closed by the base RoundTripper. @@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper { } return http.DefaultTransport } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} diff --git a/e2e/vendor/golang.org/x/sys/unix/mkerrors.sh b/e2e/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c312..d1c8b2640eb 100644 --- a/e2e/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/e2e/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -349,6 +349,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' diff --git a/e2e/vendor/golang.org/x/sys/unix/syscall_darwin.go b/e2e/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad3bf..7838ca5db20 100644 --- a/e2e/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/e2e/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux.go index 9e7a6c5a4fc..b6db27d937c 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -328,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -492,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -528,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -555,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -844,9 +849,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2025-01-17)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x31 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -937,9 +942,6 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 ETHTOOL_FAMILY_NAME = "ethtool" @@ -1213,6 +1215,7 @@ const ( FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 @@ -1231,9 +1234,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1255,6 +1261,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1274,6 +1281,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1582,7 +1590,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1633,7 +1640,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1695,7 +1701,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1817,7 +1822,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2493,6 +2502,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2652,6 +2665,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2732,6 +2749,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2982,6 +3000,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -3336,7 +3355,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xf + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3406,8 +3425,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3530,8 +3547,6 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3574,7 +3589,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3688,6 +3703,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index a8c421e29b5..1c37f9fbc45 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 9a88d18130f..6f54d34aefc 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 7cb6a867efd..783ec5c126f 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d0ecd2c583b..ca83d3ba162 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 7a2940ae0a3..607e611c0cb 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index d14ca8f2ecf..b9cb5bd3c09 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 2da1bac1e3d..65b078a6382 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 28727514b5f..5298a3033d0 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7f287b54b5d..7bc557c8761 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 7e5f9e6aa8d..152399bb04a 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 37c87952fcb..1a1ce2409cf 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5220133613a..4231a1fb578 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 4bfe2b5b6e6..21c0e952665 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index e3cffb869a3..f00d1cd7cf4 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c219c8db396..bc8d539e6af 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff306a..aca56ee4947 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695e9..2ea1ef58c3e 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e50297445..d22c8af3196 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51bb..5ee264ae974 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a18e..f9f03ebf5fa 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336b0..87c2118e849 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b9962278f..391ad102fb6 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e67..5656157757a 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc220..0482b52e3c3 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb14..71806f08f38 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b4463650256..e35a7105829 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c1881..2aea476705e 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 84053916987..6c9bb4e5607 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d6c..680bc9915a3 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9d4..620f271052f 100644 --- a/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/e2e/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8bcac2835f6..cd236443f64 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -115,7 +115,9 @@ type Statx_t struct { Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 Dio_read_offset_align uint32 - _ [9]uint64 + Atomic_write_unit_max_opt uint32 + _ [1]uint32 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -2317,6 +2320,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2597,8 +2605,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -4044,7 +4052,7 @@ const ( ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 ETHTOOL_A_TSINFO_STATS = 0x6 ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 - ETHTOOL_A_TSINFO_MAX = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4130,6 +4138,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4780,7 +4801,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x150 + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 @@ -5414,7 +5435,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5530,7 +5551,7 @@ const ( NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 62db85f6cb7..485f2d3a1bc 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,19 +282,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -330,17 +324,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -348,10 +336,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 7d89d648d9a..ecbd1ad8bc5 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -300,16 +300,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -344,27 +338,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 9c0b39eec76..02f0463a44b 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,19 +273,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -321,17 +315,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -339,10 +327,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index de9c7ff36cf..6f4d400d241 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -279,16 +279,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -323,27 +317,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2336bd2bf09..cd532cfa558 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -280,16 +280,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -324,27 +318,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 4711f0be16d..41336208517 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ab99a34b996..eaa37eb718e 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 04c9866e3cf..98ae6a1e4ac 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 60aa69f618c..cae1961594d 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index cb4fad785d1..6ce3b4e0283 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,19 +285,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,17 +327,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -351,10 +339,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 60272cfce86..c7429c6a146 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 3f5b91bc0d5..4bf4baf4cac 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 51550f15a63..e9709d70afb 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -307,16 +307,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -351,27 +345,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 3239e50e0e2..fb44268ca7d 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -302,16 +302,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -346,27 +340,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index faf20027831..9c38265c74a 100644 --- a/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/e2e/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -284,16 +284,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -328,27 +322,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/e2e/vendor/golang.org/x/term/term_windows.go b/e2e/vendor/golang.org/x/term/term_windows.go index df6bf948e14..0ddd81c02a6 100644 --- a/e2e/vendor/golang.org/x/term/term_windows.go +++ b/e2e/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/e2e/vendor/golang.org/x/term/terminal.go b/e2e/vendor/golang.org/x/term/terminal.go index 13e9a64ad10..bddb2e2aebd 100644 --- a/e2e/vendor/golang.org/x/term/terminal.go +++ b/e2e/vendor/golang.org/x/term/terminal.go @@ -146,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) { // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { diff --git a/e2e/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/e2e/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index e936c67c985..be0f990a295 100644 --- a/e2e/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/e2e/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -217,7 +217,6 @@ func typeOf(n ast.Node) uint64 { return 0 } -//go:linkname maskOf golang.org/x/tools/go/ast/inspector.maskOf func maskOf(nodes []ast.Node) uint64 { if len(nodes) == 0 { return math.MaxUint64 // match all node types diff --git a/e2e/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/e2e/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index bed9216d8d8..e017ef07142 100644 --- a/e2e/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/e2e/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -703,6 +703,65 @@ type QuotaFailure_Violation struct { // For example: "Service disabled" or "Daily Limit for read operations // exceeded". Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The API Service from which the `QuotaFailure.Violation` orginates. In + // some cases, Quota issues originate from an API Service other than the one + // that was called. In other words, a dependency of the called API Service + // could be the cause of the `QuotaFailure`, and this field would have the + // dependency API service name. + // + // For example, if the called API is Kubernetes Engine API + // (container.googleapis.com), and a quota violation occurs in the + // Kubernetes Engine API itself, this field would be + // "container.googleapis.com". On the other hand, if the quota violation + // occurs when the Kubernetes Engine API creates VMs in the Compute Engine + // API (compute.googleapis.com), this field would be + // "compute.googleapis.com". + ApiService string `protobuf:"bytes,3,opt,name=api_service,json=apiService,proto3" json:"api_service,omitempty"` + // The metric of the violated quota. A quota metric is a named counter to + // measure usage, such as API requests or CPUs. When an activity occurs in a + // service, such as Virtual Machine allocation, one or more quota metrics + // may be affected. + // + // For example, "compute.googleapis.com/cpus_per_vm_family", + // "storage.googleapis.com/internet_egress_bandwidth". + QuotaMetric string `protobuf:"bytes,4,opt,name=quota_metric,json=quotaMetric,proto3" json:"quota_metric,omitempty"` + // The id of the violated quota. Also know as "limit name", this is the + // unique identifier of a quota in the context of an API service. + // + // For example, "CPUS-PER-VM-FAMILY-per-project-region". + QuotaId string `protobuf:"bytes,5,opt,name=quota_id,json=quotaId,proto3" json:"quota_id,omitempty"` + // The dimensions of the violated quota. Every non-global quota is enforced + // on a set of dimensions. While quota metric defines what to count, the + // dimensions specify for what aspects the counter should be increased. + // + // For example, the quota "CPUs per region per VM family" enforces a limit + // on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions + // "region" and "vm_family". And if the violation occurred in region + // "us-central1" and for VM family "n1", the quota_dimensions would be, + // + // { + // "region": "us-central1", + // "vm_family": "n1", + // } + // + // When a quota is enforced globally, the quota_dimensions would always be + // empty. + QuotaDimensions map[string]string `protobuf:"bytes,6,rep,name=quota_dimensions,json=quotaDimensions,proto3" json:"quota_dimensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The enforced quota value at the time of the `QuotaFailure`. + // + // For example, if the enforced quota value at the time of the + // `QuotaFailure` on the number of CPUs is "10", then the value of this + // field would reflect this quantity. + QuotaValue int64 `protobuf:"varint,7,opt,name=quota_value,json=quotaValue,proto3" json:"quota_value,omitempty"` + // The new quota value being rolled out at the time of the violation. At the + // completion of the rollout, this value will be enforced in place of + // quota_value. If no rollout is in progress at the time of the violation, + // this field is not set. + // + // For example, if at the time of the violation a rollout is in progress + // changing the number of CPUs quota from 10 to 20, 20 would be the value of + // this field. + FutureQuotaValue *int64 `protobuf:"varint,8,opt,name=future_quota_value,json=futureQuotaValue,proto3,oneof" json:"future_quota_value,omitempty"` } func (x *QuotaFailure_Violation) Reset() { @@ -751,6 +810,48 @@ func (x *QuotaFailure_Violation) GetDescription() string { return "" } +func (x *QuotaFailure_Violation) GetApiService() string { + if x != nil { + return x.ApiService + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaMetric() string { + if x != nil { + return x.QuotaMetric + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaId() string { + if x != nil { + return x.QuotaId + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaDimensions() map[string]string { + if x != nil { + return x.QuotaDimensions + } + return nil +} + +func (x *QuotaFailure_Violation) GetQuotaValue() int64 { + if x != nil { + return x.QuotaValue + } + return 0 +} + +func (x *QuotaFailure_Violation) GetFutureQuotaValue() int64 { + if x != nil && x.FutureQuotaValue != nil { + return *x.FutureQuotaValue + } + return 0 +} + // A message type used to describe a single precondition failure. type PreconditionFailure_Violation struct { state protoimpl.MessageState @@ -775,7 +876,7 @@ type PreconditionFailure_Violation struct { func (x *PreconditionFailure_Violation) Reset() { *x = PreconditionFailure_Violation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -788,7 +889,7 @@ func (x *PreconditionFailure_Violation) String() string { func (*PreconditionFailure_Violation) ProtoMessage() {} func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -886,7 +987,7 @@ type BadRequest_FieldViolation struct { func (x *BadRequest_FieldViolation) Reset() { *x = BadRequest_FieldViolation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -899,7 +1000,7 @@ func (x *BadRequest_FieldViolation) String() string { func (*BadRequest_FieldViolation) ProtoMessage() {} func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -958,7 +1059,7 @@ type Help_Link struct { func (x *Help_Link) Reset() { *x = Help_Link{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -971,7 +1072,7 @@ func (x *Help_Link) String() string { func (*Help_Link) ProtoMessage() {} func (x *Help_Link) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1029,79 +1130,102 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x8e, 0x04, 0x0a, 0x0c, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, - 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, - 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, - 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, - 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, - 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, - 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, - 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, - 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, - 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0xb9, 0x03, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x71, + 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x19, + 0x0a, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x10, 0x71, 0x75, 0x6f, + 0x74, 0x61, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, + 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x71, 0x75, + 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, + 0x0a, 0x12, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, + 0x74, 0x75, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, + 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, + 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbd, 0x01, 0x0a, + 0x13, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x5b, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, + 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, + 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, + 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, + 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, + 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1116,7 +1240,7 @@ func file_google_rpc_error_details_proto_rawDescGZIP() []byte { return file_google_rpc_error_details_proto_rawDescData } -var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo (*RetryInfo)(nil), // 1: google.rpc.RetryInfo @@ -1130,24 +1254,26 @@ var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage nil, // 10: google.rpc.ErrorInfo.MetadataEntry (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation - (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation - (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation - (*Help_Link)(nil), // 14: google.rpc.Help.Link - (*durationpb.Duration)(nil), // 15: google.protobuf.Duration + nil, // 12: google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + (*PreconditionFailure_Violation)(nil), // 13: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 14: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 15: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 16: google.protobuf.Duration } var file_google_rpc_error_details_proto_depIdxs = []int32{ 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry - 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 16, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation - 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation - 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 13, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 14, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 15, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 12, // 6: google.rpc.QuotaFailure.Violation.quota_dimensions:type_name -> google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + 9, // 7: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } @@ -1288,7 +1414,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PreconditionFailure_Violation); i { case 0: return &v.state @@ -1300,7 +1426,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BadRequest_FieldViolation); i { case 0: return &v.state @@ -1312,7 +1438,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Help_Link); i { case 0: return &v.state @@ -1325,13 +1451,14 @@ func file_google_rpc_error_details_proto_init() { } } } + file_google_rpc_error_details_proto_msgTypes[11].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_rpc_error_details_proto_rawDesc, NumEnums: 0, - NumMessages: 15, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, diff --git a/e2e/vendor/google.golang.org/grpc/balancer/balancer.go b/e2e/vendor/google.golang.org/grpc/balancer/balancer.go index c9b343c7156..b1264017db1 100644 --- a/e2e/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/e2e/vendor/google.golang.org/grpc/balancer/balancer.go @@ -360,6 +360,10 @@ type Balancer interface { // call SubConn.Shutdown for its existing SubConns; however, this will be // required in a future release, so it is recommended. Close() + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() } // ExitIdler is an optional interface for balancers to implement. If @@ -367,8 +371,8 @@ type Balancer interface { // the ClientConn is idle. If unimplemented, ClientConn.Connect will cause // all SubConns to connect. // -// Notice: it will be required for all balancers to implement this in a future -// release. +// Deprecated: All balancers must implement this interface. This interface will +// be removed in a future release. type ExitIdler interface { // ExitIdle instructs the LB policy to reconnect to backends / exit the // IDLE state, if appropriate and possible. Note that SubConns that enter diff --git a/e2e/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/e2e/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index cc606f4dae4..0ad6bb1f220 100644 --- a/e2e/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/e2e/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -45,7 +45,15 @@ type ChildState struct { // Balancer exposes only the ExitIdler interface of the child LB policy. // Other methods of the child policy are called only by endpointsharding. - Balancer balancer.ExitIdler + Balancer ExitIdler +} + +// ExitIdler provides access to only the ExitIdle method of the child balancer. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() } // Options are the options to configure the behaviour of the @@ -205,6 +213,16 @@ func (es *endpointSharding) Close() { } } +func (es *endpointSharding) ExitIdle() { + es.childMu.Lock() + defer es.childMu.Unlock() + for _, bw := range es.children.Load().Values() { + if !bw.isClosed { + bw.child.ExitIdle() + } + } +} + // updateState updates this component's state. It sends the aggregated state, // and a picker with round robin behavior with all the child states present if // needed. @@ -326,15 +344,13 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) { // ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to // avoid deadlocks due to synchronous balancer state updates. func (bw *balancerWrapper) ExitIdle() { - if ei, ok := bw.child.(balancer.ExitIdler); ok { - go func() { - bw.es.childMu.Lock() - if !bw.isClosed { - ei.ExitIdle() - } - bw.es.childMu.Unlock() - }() - } + go func() { + bw.es.childMu.Lock() + if !bw.isClosed { + bw.child.ExitIdle() + } + bw.es.childMu.Unlock() + }() } // updateClientConnStateLocked delivers the ClientConnState to the child diff --git a/e2e/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/e2e/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 494314f2358..e62047256af 100644 --- a/e2e/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/e2e/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -54,18 +54,9 @@ func init() { balancer.Register(pickfirstBuilder{}) } -type ( - // enableHealthListenerKeyType is a unique key type used in resolver - // attributes to indicate whether the health listener usage is enabled. - enableHealthListenerKeyType struct{} - // managedByPickfirstKeyType is an attribute key type to inform Outlier - // Detection that the generic health listener is being used. - // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when - // implementing the dualstack design. This is a hack. Once Dualstack is - // completed, outlier detection will stop sending ejection updates through - // the connectivity listener. - managedByPickfirstKeyType struct{} -) +// enableHealthListenerKeyType is a unique key type used in resolver +// attributes to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} var ( logger = grpclog.Component("pick-first-leaf-lb") @@ -149,17 +140,6 @@ func EnableHealthListener(state resolver.State) resolver.State { return state } -// IsManagedByPickfirst returns whether an address belongs to a SubConn -// managed by the pickfirst LB policy. -// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable -// outlier_detection via the with connectivity listener when using pick_first. -// Once Dualstack changes are complete, all SubConns will be created by -// pick_first and outlier detection will only use the health listener for -// ejection. This hack can then be removed. -func IsManagedByPickfirst(addr resolver.Address) bool { - return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil -} - type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -186,7 +166,6 @@ type scData struct { } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { - addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) sd := &scData{ rawConnectivityState: connectivity.Idle, effectiveState: connectivity.Idle, diff --git a/e2e/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/e2e/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 35da5d1ec9d..22045bf3946 100644 --- a/e2e/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/e2e/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -70,10 +70,3 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), }) } - -func (b *rrBalancer) ExitIdle() { - // Should always be ok, as child is endpoint sharding. - if ei, ok := b.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - } -} diff --git a/e2e/vendor/google.golang.org/grpc/dialoptions.go b/e2e/vendor/google.golang.org/grpc/dialoptions.go index 050ba0f1611..ec0ca89ccdc 100644 --- a/e2e/vendor/google.golang.org/grpc/dialoptions.go +++ b/e2e/vendor/google.golang.org/grpc/dialoptions.go @@ -213,6 +213,7 @@ func WithReadBufferSize(s int) DialOption { func WithInitialWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true }) } @@ -222,6 +223,26 @@ func WithInitialWindowSize(s int32) DialOption { func WithInitialConnWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticStreamWindowSize returns a DialOption which sets the initial +// stream window size to the value provided and disables dynamic flow control. +func WithStaticStreamWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticConnWindowSize returns a DialOption which sets the initial +// connection window size to the value provided and disables dynamic flow +// control. +func WithStaticConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true }) } diff --git a/e2e/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/e2e/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 93136610ec6..f2c01f296ac 100644 --- a/e2e/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/e2e/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -188,13 +188,13 @@ type HealthServer interface { type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") + return nil, status.Error(codes.Unimplemented, "method Check not implemented") } func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") + return nil, status.Error(codes.Unimplemented, "method List not implemented") } func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") + return status.Error(codes.Unimplemented, "method Watch not implemented") } func (UnimplementedHealthServer) testEmbeddedByValue() {} diff --git a/e2e/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/e2e/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index fbc1ca356ab..ba25b898871 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/e2e/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -223,15 +223,7 @@ func (gsb *Balancer) ExitIdle() { // There is no need to protect this read with a mutex, as the write to the // Balancer field happens in SwitchTo, which completes before this can be // called. - if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - return - } - gsb.mu.Lock() - defer gsb.mu.Unlock() - for sc := range balToUpdate.subconns { - sc.Connect() - } + balToUpdate.ExitIdle() } // updateSubConnState forwards the update to the appropriate child. diff --git a/e2e/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/e2e/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index f5f2bdeb864..2fdaed88dbd 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/e2e/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -33,10 +33,6 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // LeastRequestLB is set if we should support the least_request_experimental - // LB policy, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". - LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", true) // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) diff --git a/e2e/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/e2e/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ef72fbb3a01..a2831e5d01f 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/e2e/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,6 +40,13 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { e.SetMaxDynamicTableSizeLimit(v) } +// itemNodePool is used to reduce heap allocations. +var itemNodePool = sync.Pool{ + New: func() any { + return &itemNode{} + }, +} + type itemNode struct { it any next *itemNode @@ -51,7 +58,9 @@ type itemList struct { } func (il *itemList) enqueue(i any) { - n := &itemNode{it: i} + n := itemNodePool.Get().(*itemNode) + n.next = nil + n.it = i if il.tail == nil { il.head, il.tail = n, n return @@ -71,7 +80,9 @@ func (il *itemList) dequeue() any { return nil } i := il.head.it + temp := il.head il.head = il.head.next + itemNodePool.Put(temp) if il.head == nil { il.tail = nil } @@ -146,10 +157,11 @@ type earlyAbortStream struct { func (*earlyAbortStream) isTransportResponseFrame() bool { return false } type dataFrame struct { - streamID uint32 - endStream bool - h []byte - reader mem.Reader + streamID uint32 + endStream bool + h []byte + data mem.BufferSlice + processing bool // onEachWrite is called every time // a part of data is written out. onEachWrite func() @@ -234,6 +246,7 @@ type outStream struct { itl *itemList bytesOutStanding int wq *writeQuota + reader mem.Reader next *outStream prev *outStream @@ -461,7 +474,9 @@ func (c *controlBuffer) finish() { v.onOrphaned(ErrConnClosing) } case *dataFrame: - _ = v.reader.Close() + if !v.processing { + v.data.Free() + } } } @@ -650,10 +665,11 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + reader: mem.BufferSlice{}.Reader(), } l.estdStreams[h.streamID] = str } @@ -685,10 +701,11 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { } // Case 2: Client wants to originate stream. str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + reader: mem.BufferSlice{}.Reader(), } return l.originateStream(str, h) } @@ -790,10 +807,13 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // a RST_STREAM before stream initialization thus the stream might // not be established yet. delete(l.estdStreams, c.streamID) + str.reader.Close() str.deleteSelf() for head := str.itl.dequeueAll(); head != nil; head = head.next { if df, ok := head.it.(*dataFrame); ok { - _ = df.reader.Close() + if !df.processing { + df.data.Free() + } } } } @@ -928,7 +948,13 @@ func (l *loopyWriter) processData() (bool, error) { if str == nil { return true, nil } + reader := str.reader dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + if !dataItem.processing { + dataItem.processing = true + str.reader.Reset(dataItem.data) + dataItem.data.Free() + } // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. // Every dataFrame has two buffers; h that keeps grpc-message header and data @@ -936,13 +962,13 @@ func (l *loopyWriter) processData() (bool, error) { // from data is copied to h to make as big as the maximum possible HTTP2 frame // size. - if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame + if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = dataItem.reader.Close() + _ = reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -971,8 +997,8 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, dataItem.reader.Remaining()) - remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + dSize := min(maxSize-hSize, reader.Remaining()) + remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize size := hSize + dSize var buf *[]byte @@ -993,7 +1019,7 @@ func (l *loopyWriter) processData() (bool, error) { defer pool.Put(buf) copy((*buf)[:hSize], dataItem.h) - _, _ = dataItem.reader.Read((*buf)[hSize:]) + _, _ = reader.Read((*buf)[hSize:]) } // Now that outgoing flow controls are checked we can replenish str's write quota @@ -1014,7 +1040,7 @@ func (l *loopyWriter) processData() (bool, error) { dataItem.h = dataItem.h[hSize:] if remainingBytes == 0 { // All the data from that message was written out. - _ = dataItem.reader.Close() + _ = reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/e2e/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/e2e/vendor/google.golang.org/grpc/internal/transport/http2_client.go index ef56592b944..5467fe9715a 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/e2e/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -309,11 +309,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts scheme = "https" } } - dynamicWindow := true icwz := int32(initialWindowSize) if opts.InitialConnWindowSize >= defaultWindowSize { icwz = opts.InitialConnWindowSize - dynamicWindow = false } writeBufSize := opts.WriteBufferSize readBufSize := opts.ReadBufferSize @@ -381,9 +379,8 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize - dynamicWindow = false } - if dynamicWindow { + if !opts.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, @@ -1091,32 +1088,29 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { - reader := data.Reader() - if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { - _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { - _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - reader: reader, + data: data, } - if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return err } } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } t.incrMsgSent() diff --git a/e2e/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/e2e/vendor/google.golang.org/grpc/internal/transport/http2_server.go index e4c3731bdb0..9f725e15a81 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/e2e/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -132,6 +132,10 @@ type http2Server struct { maxStreamID uint32 // max stream ID ever seen logger *grpclog.PrefixLogger + // setResetPingStrikes is stored as a closure instead of making this a + // method on http2Server to avoid a heap allocation when converting a method + // to a closure for passing to frames objects. + setResetPingStrikes func() } // NewServerTransport creates a http2 transport with conn and configuration @@ -176,16 +180,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, Val: config.MaxStreams, }) } - dynamicWindow := true iwz := int32(initialWindowSize) if config.InitialWindowSize >= defaultWindowSize { iwz = config.InitialWindowSize - dynamicWindow = false } icwz := int32(initialWindowSize) if config.InitialConnWindowSize >= defaultWindowSize { icwz = config.InitialConnWindowSize - dynamicWindow = false } if iwz != defaultWindowSize { isettings = append(isettings, http2.Setting{ @@ -266,6 +267,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, initialWindowSize: iwz, bufferPool: config.BufferPool, } + t.setResetPingStrikes = func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { czSecurity = au.GetSecurityValue() @@ -285,7 +289,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.logger = prefixLoggerForServerTransport(t) t.controlBuf = newControlBuffer(t.done) - if dynamicWindow { + if !config.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, @@ -596,10 +600,25 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade return nil } } + + if s.ctx.Err() != nil { + t.mu.Unlock() + // Early abort in case the timeout was zero or so low it already fired. + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusOK, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()), + rst: !frame.StreamEnded(), + }) + return nil + } + t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } + // Start a timer to close the stream on reaching the deadline. if timeoutSet { // We need to wait for s.cancel to be updated before calling @@ -1016,10 +1035,6 @@ func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { return nil } -func (t *http2Server) setResetPingStrikes() { - atomic.StoreUint32(&t.resetPingStrikes, 1) -} - func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. @@ -1132,17 +1147,13 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { - reader := data.Reader() - if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.writeHeader(s, nil); err != nil { - _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - _ = reader.Close() return t.streamContextErr(s) } } @@ -1150,15 +1161,16 @@ func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ df := &dataFrame{ streamID: s.id, h: hdr, - reader: reader, + data: data, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return t.streamContextErr(s) } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } t.incrMsgSent() diff --git a/e2e/vendor/google.golang.org/grpc/internal/transport/http_util.go b/e2e/vendor/google.golang.org/grpc/internal/transport/http_util.go index 607d2c4cee0..e3663f87f39 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/e2e/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -200,9 +200,6 @@ func decodeTimeout(s string) (time.Duration, error) { if err != nil { return 0, err } - if t == 0 { - return 0, fmt.Errorf("transport: timeout must be positive: %q", s) - } const maxHours = math.MaxInt64 / uint64(time.Hour) if d == time.Hour && t > maxHours { // This timeout would overflow math.MaxInt64; clamp it. diff --git a/e2e/vendor/google.golang.org/grpc/internal/transport/transport.go b/e2e/vendor/google.golang.org/grpc/internal/transport/transport.go index 1730a639f9d..7dd53e80a75 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/e2e/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -466,6 +466,7 @@ type ServerConfig struct { MaxHeaderListSize *uint32 HeaderTableSize *uint32 BufferPool mem.BufferPool + StaticWindowSize bool } // ConnectOptions covers all relevant options for communicating with the server. @@ -504,6 +505,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // The mem.BufferPool to use when reading/writing to the wire. BufferPool mem.BufferPool + // StaticWindowSize controls whether dynamic window sizing is enabled. + StaticWindowSize bool } // WriteOptions provides additional hints and information for message diff --git a/e2e/vendor/google.golang.org/grpc/mem/buffer_slice.go b/e2e/vendor/google.golang.org/grpc/mem/buffer_slice.go index 65002e2cc85..af510d20c5a 100644 --- a/e2e/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/e2e/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -137,6 +137,9 @@ type Reader interface { Close() error // Remaining returns the number of unread bytes remaining in the slice. Remaining() int + // Reset frees the currently held buffer slice and starts reading from the + // provided slice. This allows reusing the reader object. + Reset(s BufferSlice) } type sliceReader struct { @@ -150,6 +153,14 @@ func (r *sliceReader) Remaining() int { return r.len } +func (r *sliceReader) Reset(s BufferSlice) { + r.data.Free() + s.Ref() + r.data = s + r.len = s.Len() + r.bufferIdx = 0 +} + func (r *sliceReader) Close() error { r.data.Free() r.data = nil diff --git a/e2e/vendor/google.golang.org/grpc/server.go b/e2e/vendor/google.golang.org/grpc/server.go index 976e70ae068..70fe23f5502 100644 --- a/e2e/vendor/google.golang.org/grpc/server.go +++ b/e2e/vendor/google.golang.org/grpc/server.go @@ -179,6 +179,7 @@ type serverOptions struct { numServerWorkers uint32 bufferPool mem.BufferPool waitForHandlers bool + staticWindowSize bool } var defaultServerOptions = serverOptions{ @@ -279,6 +280,7 @@ func ReadBufferSize(s int) ServerOption { func InitialWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialWindowSize = s + o.staticWindowSize = true }) } @@ -287,6 +289,29 @@ func InitialWindowSize(s int32) ServerOption { func InitialConnWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialConnWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticStreamWindowSize returns a ServerOption to set the initial stream +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticStreamWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticConnWindowSize returns a ServerOption to set the initial connection +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + o.staticWindowSize = true }) } @@ -986,6 +1011,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, BufferPool: s.opts.bufferPool, + StaticWindowSize: s.opts.staticWindowSize, } st, err := transport.NewServerTransport(c, config) if err != nil { diff --git a/e2e/vendor/google.golang.org/grpc/stream.go b/e2e/vendor/google.golang.org/grpc/stream.go index d58bb6471a8..ca6948926f9 100644 --- a/e2e/vendor/google.golang.org/grpc/stream.go +++ b/e2e/vendor/google.golang.org/grpc/stream.go @@ -1171,7 +1171,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (a *csAttempt) finish(err error) { @@ -1495,7 +1495,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (as *addrConnStream) finish(err error) { diff --git a/e2e/vendor/google.golang.org/grpc/version.go b/e2e/vendor/google.golang.org/grpc/version.go index bd82673dc9f..8b0e5f973d6 100644 --- a/e2e/vendor/google.golang.org/grpc/version.go +++ b/e2e/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.73.0" +const Version = "1.74.2" diff --git a/e2e/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/e2e/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index e942bc983ee..743bfb81d6c 100644 --- a/e2e/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/e2e/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) { func SizeVarint(v uint64) int { // This computes 1 + (bits.Len64(v)-1)/7. // 9/64 is a good enough approximation of 1/7 - return int(9*uint32(bits.Len64(v))+64) / 64 + // + // The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT + // instruction, which is very fast on CPUs from the last few years. The + // specific way of expressing the calculation matches C++ Protobuf, see + // https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang + // optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell). + + // By OR'ing v with 1, we guarantee that v is never 0, without changing the + // result of SizeVarint. LZCNT is not defined for 0, meaning the compiler + // needs to add extra instructions to handle that case. + // + // The Go compiler currently (go1.24.4) does not make use of this knowledge. + // This opportunity (removing the XOR instruction, which handles the 0 case) + // results in a small (1%) performance win across CPU architectures. + // + // Independently of avoiding the 0 case, we need the v |= 1 line because + // it allows the Go compiler to eliminate an extra XCHGL barrier. + v |= 1 + + // It would be clearer to write log2value := 63 - uint32(...), but + // writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel). + // Proof of identity for our value range [0..63]: + // https://go.dev/play/p/Pdn9hEWYakX + log2value := uint32(bits.LeadingZeros64(v)) ^ 63 + return int((log2value*9 + (64 + 9)) / 64) } // AppendFixed32 appends v to b as a little-endian uint32. diff --git a/e2e/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/e2e/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 323829da1477e4496d664b2a1092a9f9cec275d4..04696351eeeef14cbbd69fd1f4250530b1fbfd56 100644 GIT binary patch literal 154 zcmX}mI}(5(3Eat$;}$;v literal 146 zcmX}mF%Ezr3X5(&e%rBRTLK{CjOa+)E@2mYkk=mEF7 B6)FG# diff --git a/e2e/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/e2e/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index b08b71830c6..a0aad2777f3 100644 --- a/e2e/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/e2e/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -72,6 +72,9 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { case genid.FeatureSet_EnforceNamingStyle_field_number: // EnforceNamingStyle is enforced in protoc, languages other than C++ // are not supposed to do anything with this feature. + case genid.FeatureSet_DefaultSymbolVisibility_field_number: + // DefaultSymbolVisibility is enforced in protoc, runtimes should not + // inspect this value. default: panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) } diff --git a/e2e/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/e2e/vendor/google.golang.org/protobuf/internal/filedesc/presence.go new file mode 100644 index 00000000000..a12ec9791cb --- /dev/null +++ b/e2e/vendor/google.golang.org/protobuf/internal/filedesc/presence.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import "google.golang.org/protobuf/reflect/protoreflect" + +// UsePresenceForField reports whether the presence bitmap should be used for +// the specified field. +func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneof fields never use the presence bitmap. + // + // Synthetic oneofs are an exception: Those are used to implement proto3 + // optional fields and hence should follow non-oneof field semantics. + return false, false + + case fd.IsMap(): + // Map-typed fields never use the presence bitmap. + return false, false + + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + // Lazy fields always use the presence bitmap (only messages can be lazy). + isLazy := fd.(interface{ IsLazy() bool }).IsLazy() + return isLazy, isLazy + + default: + // If the field has presence, use the presence bitmap. + return fd.HasPresence(), false + } +} diff --git a/e2e/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/e2e/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 39524782add..950a6a325a4 100644 --- a/e2e/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/e2e/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -34,6 +34,19 @@ const ( Edition_EDITION_MAX_enum_value = 2147483647 ) +// Full and short names for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility" + SymbolVisibility_enum_name = "SymbolVisibility" +) + +// Enum values for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_VISIBILITY_UNSET_enum_value = 0 + SymbolVisibility_VISIBILITY_LOCAL_enum_value = 1 + SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -65,6 +78,7 @@ const ( FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency" FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" FileDescriptorProto_Service_field_name protoreflect.Name = "service" @@ -79,6 +93,7 @@ const ( FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency" FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" @@ -96,6 +111,7 @@ const ( FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15 FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 @@ -124,6 +140,7 @@ const ( DescriptorProto_Options_field_name protoreflect.Name = "options" DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + DescriptorProto_Visibility_field_name protoreflect.Name = "visibility" DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" @@ -135,6 +152,7 @@ const ( DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" + DescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.visibility" ) // Field numbers for google.protobuf.DescriptorProto. @@ -149,6 +167,7 @@ const ( DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 + DescriptorProto_Visibility_field_number protoreflect.FieldNumber = 11 ) // Names for google.protobuf.DescriptorProto.ExtensionRange. @@ -388,12 +407,14 @@ const ( EnumDescriptorProto_Options_field_name protoreflect.Name = "options" EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + EnumDescriptorProto_Visibility_field_name protoreflect.Name = "visibility" EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" + EnumDescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility" ) // Field numbers for google.protobuf.EnumDescriptorProto. @@ -403,6 +424,7 @@ const ( EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 + EnumDescriptorProto_Visibility_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. @@ -1008,32 +1030,35 @@ const ( // Field names for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" - FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" - FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" - FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" - FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" - FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" - FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" - - FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" - FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" - FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" - FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" - FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" - FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" - FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility" ) // Field numbers for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 - FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 - FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 - FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 - FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 - FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 - FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1128,6 +1153,27 @@ const ( FeatureSet_STYLE_LEGACY_enum_value = 2 ) +// Names for google.protobuf.FeatureSet.VisibilityFeature. +const ( + FeatureSet_VisibilityFeature_message_name protoreflect.Name = "VisibilityFeature" + FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature" +) + +// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility" + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name = "DefaultSymbolVisibility" +) + +// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0 + FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value = 1 + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value = 2 + FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value = 3 + FeatureSet_VisibilityFeature_STRICT_enum_value = 4 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/e2e/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/e2e/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 41c1f74ef81..bdad12a9bbc 100644 --- a/e2e/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/e2e/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf // permit us to skip over definitely-unset fields at marshal time. var hasPresence bool - hasPresence, cf.isLazy = usePresenceForField(si, fd) + hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd) if hasPresence { cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) diff --git a/e2e/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/e2e/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index dd55e8e009c..5a439daacb7 100644 --- a/e2e/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/e2e/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -11,6 +11,7 @@ import ( "strings" "sync/atomic" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] var fi fieldInfo - usePresence, _ := usePresenceForField(si, fd) + usePresence, _ := filedesc.UsePresenceForField(fd) switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): @@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return false } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return false } - rv := sp.AsValueOf(fs.Type.Elem()) return rv.Elem().Len() > 0 }, clear: func(p pointer) { - sp := p.Apply(fieldOffset).AtomicGetPointer() - if !sp.IsNil() { - rv := sp.AsValueOf(fs.Type.Elem()) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if !rv.IsNil() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } }, @@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return conv.Zero() } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return conv.Zero() } - rv := sp.AsValueOf(fs.Type.Elem()) if rv.Elem().Len() == 0 { return conv.Zero() } @@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) { func (mi *MessageInfo) present(p pointer, index uint32) bool { return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) } - -// usePresenceForField implements the somewhat intricate logic of when -// the presence bitmap is used for a field. The main logic is that a -// field that is optional or that can be lazy will use the presence -// bit, but for proto2, also maps have a presence bit. It also records -// if the field can ever be lazy, which is true if we have a -// lazyOffset and the field is a message or a slice of messages. A -// field that is lazy will always need a presence bit. Oneofs are not -// lazy and do not use presence, unless they are a synthetic oneof, -// which is a proto3 optional field. For proto3 optionals, we use the -// presence and they can also be lazy when applicable (a message). -func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { - hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() - - // Non-oneof scalar fields with explicit field presence use the presence array. - usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) - switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - return false, false - case fd.IsMap(): - return false, false - case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: - return hasLazyField, hasLazyField - default: - return usesPresenceArray || (hasLazyField && fd.HasPresence()), false - } -} diff --git a/e2e/vendor/google.golang.org/protobuf/internal/impl/presence.go b/e2e/vendor/google.golang.org/protobuf/internal/impl/presence.go index 914cb1deda2..443afe81cda 100644 --- a/e2e/vendor/google.golang.org/protobuf/internal/impl/presence.go +++ b/e2e/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) { // Present checks for the presence of a specific field number in a presence set. func (p presence) Present(num uint32) bool { - if p.P == nil { - return false - } return Export{}.Present(p.toElem(num), num) } diff --git a/e2e/vendor/google.golang.org/protobuf/internal/version/version.go b/e2e/vendor/google.golang.org/protobuf/internal/version/version.go index aac1cb18a74..a53364c5992 100644 --- a/e2e/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/e2e/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 6 + Patch = 7 PreRelease = "" ) diff --git a/e2e/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/e2e/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index a4a0a2971dd..730331e6668 100644 --- a/e2e/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/e2e/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "public_dependency", nil) case 11: b = p.appendRepeatedField(b, "weak_dependency", nil) + case 15: + b = p.appendRepeatedField(b, "option_dependency", nil) case 4: b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) case 5: @@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) case 10: b = p.appendRepeatedField(b, "reserved_name", nil) + case 11: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) case 5: b = p.appendRepeatedField(b, "reserved_name", nil) + case 6: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -400,6 +406,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "json_format", nil) case 7: b = p.appendSingularField(b, "enforce_naming_style", nil) + case 8: + b = p.appendSingularField(b, "default_symbol_visibility", nil) } return b } diff --git a/e2e/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/e2e/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 7fe280f194c..6843b0beafa 100644 --- a/e2e/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/e2e/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} } +// Describes the 'visibility' of a symbol with respect to the proto import +// system. Symbols can only be imported when the visibility rules do not prevent +// it (ex: local symbols cannot be imported). Visibility modifiers can only set +// on `message` and `enum` as they are the only types available to be referenced +// from other files. +type SymbolVisibility int32 + +const ( + SymbolVisibility_VISIBILITY_UNSET SymbolVisibility = 0 + SymbolVisibility_VISIBILITY_LOCAL SymbolVisibility = 1 + SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2 +) + +// Enum value maps for SymbolVisibility. +var ( + SymbolVisibility_name = map[int32]string{ + 0: "VISIBILITY_UNSET", + 1: "VISIBILITY_LOCAL", + 2: "VISIBILITY_EXPORT", + } + SymbolVisibility_value = map[string]int32{ + "VISIBILITY_UNSET": 0, + "VISIBILITY_LOCAL": 1, + "VISIBILITY_EXPORT": 2, + } +) + +func (x SymbolVisibility) Enum() *SymbolVisibility { + p := new(SymbolVisibility) + *p = x + return p +} + +func (x SymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (SymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x SymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *SymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = SymbolVisibility(num) + return nil +} + +// Deprecated: Use SymbolVisibility.Descriptor instead. +func (SymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[10] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string { } func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() } func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[10] + return &file_google_protobuf_descriptor_proto_enumTypes[11] } func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { @@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string { } func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() } func (FeatureSet_EnumType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[11] + return &file_google_protobuf_descriptor_proto_enumTypes[12] } func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { @@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string { } func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() } func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[12] + return &file_google_protobuf_descriptor_proto_enumTypes[13] } func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { @@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string { } func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() } func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[13] + return &file_google_protobuf_descriptor_proto_enumTypes[14] } func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { @@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string { } func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() } func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[14] + return &file_google_protobuf_descriptor_proto_enumTypes[15] } func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { @@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string { } func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[15] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { @@ -1172,11 +1236,11 @@ func (x FeatureSet_EnforceNamingStyle) String() string { } func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() } func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[17] } func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { @@ -1198,6 +1262,77 @@ func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} } +type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32 + +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0 + // Default pre-EDITION_2024, all UNSET visibility are export. + FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1 + // All top-level symbols default to export, nested default to local. + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2 + // All symbols default to local. + FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3 + // All symbols local by default. Nested types cannot be exported. + // With special case caveat for message { enum {} reserved 1 to max; } + // This is the recommended setting for new protos. + FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4 +) + +// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility. +var ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{ + 0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN", + 1: "EXPORT_ALL", + 2: "EXPORT_TOP_LEVEL", + 3: "LOCAL_ALL", + 4: "STRICT", + } + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{ + "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0, + "EXPORT_ALL": 1, + "EXPORT_TOP_LEVEL": 2, + "LOCAL_ALL": 3, + "STRICT": 4, + } +) + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility) + *p = x + return p +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor() +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[18] +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num) + return nil +} + +// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead. +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1236,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[17] + return &file_google_protobuf_descriptor_proto_enumTypes[19] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1321,6 +1456,9 @@ type FileDescriptorProto struct { // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // Names of files imported by this file purely for the purpose of providing + // option extensions. These are excluded from the dependency list above. + OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"` // All top-level definitions in this file. MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` @@ -1414,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 { return nil } +func (x *FileDescriptorProto) GetOptionDependency() []string { + if x != nil { + return x.OptionDependency + } + return nil +} + func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { if x != nil { return x.MessageType @@ -1484,7 +1629,9 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1589,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string { return nil } +func (x *DescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + type ExtensionRangeOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. @@ -1901,7 +2055,9 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1971,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string { return nil } +func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -3392,17 +3555,18 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState `protogen:"open.v1"` - FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` - EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` - RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` - Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` - MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` - JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` - EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` + DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { @@ -3484,6 +3648,13 @@ func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN } +func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + if x != nil && x.DefaultSymbolVisibility != nil { + return *x.DefaultSymbolVisibility + } + return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4144,6 +4315,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +type FeatureSet_VisibilityFeature struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FeatureSet_VisibilityFeature) Reset() { + *x = FeatureSet_VisibilityFeature{} + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FeatureSet_VisibilityFeature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet_VisibilityFeature) ProtoMessage() {} + +func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead. +func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + // A map from every known edition with a unique set of defaults to its // defaults. Not all editions may be contained here. For a given edition, // the defaults at the closest matching edition ordered at or before it should @@ -4161,7 +4368,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4173,7 +4380,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4309,7 +4516,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4321,7 +4528,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4393,7 +4600,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4405,7 +4612,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4462,7 +4669,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\n" + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + "\x11FileDescriptorSet\x128\n" + - "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" + "\x13FileDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + @@ -4471,7 +4678,8 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "dependency\x12+\n" + "\x11public_dependency\x18\n" + " \x03(\x05R\x10publicDependency\x12'\n" + - "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" + + "\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + @@ -4479,7 +4687,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + - "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" + "\x0fDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + @@ -4493,7 +4701,10 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + "\rreserved_name\x18\n" + - " \x03(\tR\freservedName\x1az\n" + + " \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1az\n" + "\x0eExtensionRange\x12\x14\n" + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + @@ -4562,13 +4773,16 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + "\x14OneofDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + - "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" + "\x13EnumDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + - "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1a;\n" + "\x11EnumReservedRange\x12\x14\n" + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + @@ -4728,7 +4942,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + "\bNamePart\x12\x1b\n" + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + - "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" + "\n" + "FeatureSet\x12\x91\x01\n" + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + @@ -4739,7 +4953,18 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + "jsonFormat\x12\xab\x01\n" + - "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" + + "\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" + + "EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" + + "\x11VisibilityFeature\"\x81\x01\n" + + "\x17DefaultSymbolVisibility\x12%\n" + + "!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" + + "\n" + + "EXPORT_ALL\x10\x01\x12\x14\n" + + "\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" + + "\tLOCAL_ALL\x10\x03\x12\n" + + "\n" + + "\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" + "\rFieldPresence\x12\x1a\n" + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + "\bEXPLICIT\x10\x01\x12\f\n" + @@ -4817,7 +5042,11 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + - "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" + + "\x10SymbolVisibility\x12\x14\n" + + "\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" + + "\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" + + "\x11VISIBILITY_EXPORT\x10\x02B~\n" + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( @@ -4832,145 +5061,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34) var file_google_protobuf_descriptor_proto_goTypes = []any{ - (Edition)(0), // 0: google.protobuf.Edition - (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel - (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence - (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType - (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding - (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation - (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding - (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle - (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 28: google.protobuf.FileOptions - (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility + (ExtensionRangeOptions_VerificationState)(0), // 2: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 3: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 4: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 5: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 6: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 7: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 8: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 9: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 10: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 11: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 12: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 14: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 15: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 16: google.protobuf.FeatureSet.JsonFormat + (FeatureSet_EnforceNamingStyle)(0), // 17: google.protobuf.FeatureSet.EnforceNamingStyle + (FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + (GeneratedCodeInfo_Annotation_Semantic)(0), // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 20: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 21: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 22: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 23: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 24: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 25: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 26: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 27: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 28: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 29: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 30: google.protobuf.FileOptions + (*MessageOptions)(nil), // 31: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 32: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 33: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 34: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 35: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 36: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 37: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 38: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 39: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 40: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 41: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 42: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 43: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 44: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 45: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 47: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 48: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 49: google.protobuf.UninterpretedOption.NamePart + (*FeatureSet_VisibilityFeature)(nil), // 50: google.protobuf.FeatureSet.VisibilityFeature + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 52: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 53: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet - 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle - 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 78, // [78:78] is the sub-list for method output_type - 78, // [78:78] is the sub-list for method input_type - 78, // [78:78] is the sub-list for extension type_name - 78, // [78:78] is the sub-list for extension extendee - 0, // [0:78] is the sub-list for field type_name + 24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 1, // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 2, // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 4, // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 3, // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 1, // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 5, // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 6, // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 7, // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 8, // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 9, // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + 51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 81, // [81:81] is the sub-list for method output_type + 81, // [81:81] is the sub-list for method input_type + 81, // [81:81] is the sub-list for extension type_name + 81, // [81:81] is the sub-list for extension extendee + 0, // [0:81] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4983,8 +5218,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 18, - NumMessages: 33, + NumEnums: 20, + NumMessages: 34, NumExtensions: 0, NumServices: 0, }, diff --git a/e2e/vendor/k8s.io/component-helpers/resource/helpers.go b/e2e/vendor/k8s.io/component-helpers/resource/helpers.go index 780db542451..7ff5bef111d 100644 --- a/e2e/vendor/k8s.io/component-helpers/resource/helpers.go +++ b/e2e/vendor/k8s.io/component-helpers/resource/helpers.go @@ -404,7 +404,12 @@ func maxResourceList(list, newList v1.ResourceList) { // max returns the result of max(a, b...) for each named resource and is only used if we can't // accumulate into an existing resource list func max(a v1.ResourceList, b ...v1.ResourceList) v1.ResourceList { - result := a.DeepCopy() + var result v1.ResourceList + if a != nil { + result = a.DeepCopy() + } else { + result = v1.ResourceList{} + } for _, other := range b { maxResourceList(result, other) } diff --git a/e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 7574ceaf6b2..dae697066ba 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -129,13 +129,17 @@ func readFromURL(url string, writer io.Writer) error { var ( initRegistry = RegistryList{ - GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling", - PromoterE2eRegistry: "registry.k8s.io/e2e-test-images", - BuildImageRegistry: "registry.k8s.io/build-image", - InvalidRegistry: "invalid.registry.k8s.io/invalid", - GcEtcdRegistry: "registry.k8s.io", - GcRegistry: "registry.k8s.io", - SigStorageRegistry: "registry.k8s.io/sig-storage", + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Eliminate GcAuthenticatedRegistry. + GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling", + PromoterE2eRegistry: "registry.k8s.io/e2e-test-images", + BuildImageRegistry: "registry.k8s.io/build-image", + InvalidRegistry: "invalid.registry.k8s.io/invalid", + GcEtcdRegistry: "registry.k8s.io", + GcRegistry: "registry.k8s.io", + SigStorageRegistry: "registry.k8s.io/sig-storage", + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Eliminate PrivateRegistry. PrivateRegistry: "gcr.io/k8s-authenticated-test", DockerLibraryRegistry: "docker.io/library", CloudProviderGcpRegistry: "registry.k8s.io/cloud-provider-gcp", @@ -152,15 +156,17 @@ const ( // Agnhost image Agnhost // AgnhostPrivate image + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Eliminate this. AgnhostPrivate // APIServer image APIServer // AppArmorLoader image AppArmorLoader // AuthenticatedAlpine image + // TODO: https://github.com/kubernetes/kubernetes/issues/130271 + // Eliminate this. AuthenticatedAlpine - // AuthenticatedWindowsNanoServer image - AuthenticatedWindowsNanoServer // BusyBox image BusyBox // DistrolessIptables Image @@ -219,11 +225,10 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.53"} configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"} configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"} - configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"} configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.29.2"} configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"} configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.6"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.7"} configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.21-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} @@ -270,7 +275,7 @@ func GetMappedImageConfigs(originalImageConfigs map[ImageID]Config, repo string) for i, config := range originalImageConfigs { switch i { case InvalidRegistryImage, AuthenticatedAlpine, - AuthenticatedWindowsNanoServer, AgnhostPrivate: + AgnhostPrivate: // These images are special and can't be run out of the cloud - some because they // are authenticated, and others because they are not real images. Tests that depend // on these images can't be run without access to the public internet. diff --git a/e2e/vendor/modules.txt b/e2e/vendor/modules.txt index 6066cbeae58..d489e322d47 100644 --- a/e2e/vendor/modules.txt +++ b/e2e/vendor/modules.txt @@ -1,4 +1,4 @@ -# cel.dev/expr v0.23.0 +# cel.dev/expr v0.24.0 ## explicit; go 1.22.0 cel.dev/expr # github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab @@ -24,7 +24,7 @@ github.com/blang/semver/v4 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 # github.com/ceph/ceph-csi v2.0.1+incompatible => ../ -## explicit; go 1.24.0 +## explicit; go 1.24.4 github.com/ceph/ceph-csi/internal/util/cryptsetup github.com/ceph/ceph-csi/internal/util/file github.com/ceph/ceph-csi/internal/util/log @@ -330,8 +330,8 @@ github.com/opencontainers/selinux/pkg/pwalkdir # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.22.0 -## explicit; go 1.22 +# github.com/prometheus/client_golang v1.23.0 +## explicit; go 1.23.0 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus @@ -342,15 +342,15 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint github.com/prometheus/client_golang/prometheus/testutil/promlint/validations -# github.com/prometheus/client_model v0.6.1 -## explicit; go 1.19 +# github.com/prometheus/client_model v0.6.2 +## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.62.0 -## explicit; go 1.21 +# github.com/prometheus/common v0.65.0 +## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model -# github.com/prometheus/procfs v0.15.1 -## explicit; go 1.20 +# github.com/prometheus/procfs v0.16.1 +## explicit; go 1.23.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -383,14 +383,13 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/attribute/internal go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes -go.opentelemetry.io/otel/internal -go.opentelemetry.io/otel/internal/attribute go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation @@ -408,21 +407,21 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/metric v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/sdk v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/trace v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry @@ -439,7 +438,7 @@ go.uber.org/automaxprocs go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs -# golang.org/x/crypto v0.40.0 +# golang.org/x/crypto v0.41.0 ## explicit; go 1.23.0 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -452,7 +451,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.42.0 +# golang.org/x/net v0.43.0 ## explicit; go 1.23.0 golang.org/x/net/context golang.org/x/net/html @@ -468,24 +467,24 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.29.0 +# golang.org/x/oauth2 v0.30.0 ## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.16.0 ## explicit; go 1.23.0 golang.org/x/sync/singleflight -# golang.org/x/sys v0.34.0 +# golang.org/x/sys v0.35.0 ## explicit; go 1.23.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.33.0 +# golang.org/x/term v0.34.0 ## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.27.0 +# golang.org/x/text v0.28.0 ## explicit; go 1.23.0 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -518,20 +517,20 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.34.0 +# golang.org/x/tools v0.35.0 ## explicit; go 1.23.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/edge golang.org/x/tools/go/ast/inspector -# google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 +# google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.73.0 +# google.golang.org/grpc v1.74.2 ## explicit; go 1.23.0 google.golang.org/grpc google.golang.org/grpc/attributes @@ -595,7 +594,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.6 +# google.golang.org/protobuf v1.36.7 ## explicit; go 1.22 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson @@ -649,7 +648,7 @@ gopkg.in/inf.v0 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.33.3 => k8s.io/api v0.33.2 +# k8s.io/api v0.33.4 => k8s.io/api v0.33.2 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -716,7 +715,7 @@ k8s.io/api/storagemigration/v1alpha1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.33.3 +# k8s.io/apimachinery v0.33.4 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -787,7 +786,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.33.3 +# k8s.io/apiserver v0.33.4 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1197,11 +1196,11 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.33.3 +# k8s.io/cloud-provider v0.33.4 ## explicit; go 1.24.0 k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.33.3 +# k8s.io/component-base v0.33.4 ## explicit; go 1.24.0 k8s.io/component-base/cli/flag k8s.io/component-base/compatibility @@ -1225,7 +1224,7 @@ k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version k8s.io/component-base/zpages/features -# k8s.io/component-helpers v0.33.3 +# k8s.io/component-helpers v0.33.4 ## explicit; go 1.24.0 k8s.io/component-helpers/node/topology k8s.io/component-helpers/node/util/sysctl @@ -1234,7 +1233,7 @@ k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/ephemeral k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.33.3 +# k8s.io/controller-manager v0.33.4 ## explicit; go 1.24.0 k8s.io/controller-manager/pkg/features # k8s.io/cri-api v0.33.2 @@ -1304,7 +1303,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.33.3 +# k8s.io/kubernetes v1.33.4 ## explicit; go 1.24.0 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1463,10 +1462,10 @@ k8s.io/kubernetes/test/utils/kubeconfig k8s.io/kubernetes/third_party/forked/golang/expansion k8s.io/kubernetes/third_party/forked/libcontainer/apparmor k8s.io/kubernetes/third_party/forked/libcontainer/utils -# k8s.io/mount-utils v0.33.1 +# k8s.io/mount-utils v0.33.3 ## explicit; go 1.24.0 k8s.io/mount-utils -# k8s.io/pod-security-admission v0.33.3 +# k8s.io/pod-security-admission v0.33.4 ## explicit; go 1.24.0 k8s.io/pod-security-admission/api k8s.io/pod-security-admission/policy From 02da57e5c47791a4b07e384a5e8483ee025f4080 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 13:41:33 +0000 Subject: [PATCH 035/157] rebase: bump google.golang.org/grpc from 1.74.2 to 1.75.0 Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.74.2 to 1.75.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.74.2...v1.75.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-version: 1.75.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 12 +- go.sum | 34 +- .../go-jose/go-jose/v4/CHANGELOG.md | 5 + vendor/github.com/go-jose/go-jose/v4/jwe.go | 19 +- vendor/github.com/go-jose/go-jose/v4/jwk.go | 2 +- vendor/github.com/go-jose/go-jose/v4/jws.go | 74 +- .../go-jose/go-jose/v4/symmetric.go | 12 +- .../go-jose/go-jose/v4/symmetric_go124.go | 28 + .../go-jose/go-jose/v4/symmetric_legacy.go | 29 + .../go.opentelemetry.io/otel/.clomonitor.yml | 3 + vendor/go.opentelemetry.io/otel/.golangci.yml | 6 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 60 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 17 +- vendor/go.opentelemetry.io/otel/Makefile | 2 +- vendor/go.opentelemetry.io/otel/README.md | 1 + vendor/go.opentelemetry.io/otel/RELEASING.md | 23 + .../otel/dependencies.Dockerfile | 4 +- .../otel/semconv/v1.34.0/MIGRATION.md | 4 + .../otel/semconv/v1.34.0/README.md | 3 + .../otel/semconv/v1.34.0/attribute_group.go | 13851 ++++++++++++++++ .../otel/semconv/v1.34.0/doc.go | 9 + .../otel/semconv/v1.34.0/exception.go | 9 + .../otel/semconv/v1.34.0/schema.go | 9 + vendor/go.opentelemetry.io/otel/trace/auto.go | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 11 +- vendor/google.golang.org/grpc/MAINTAINERS.md | 8 +- .../endpointsharding/endpointsharding.go | 21 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 6 +- vendor/google.golang.org/grpc/clientconn.go | 11 +- .../grpc/credentials/credentials.go | 25 +- .../google.golang.org/grpc/credentials/tls.go | 30 +- vendor/google.golang.org/grpc/dialoptions.go | 2 + .../grpc/internal/envconfig/envconfig.go | 16 +- .../grpc/internal/internal.go | 29 - .../internal/resolver/dns/dns_resolver.go | 20 +- .../google.golang.org/grpc/picker_wrapper.go | 36 +- .../grpc/resolver/resolver.go | 5 + vendor/google.golang.org/grpc/server.go | 1 + vendor/google.golang.org/grpc/stats/stats.go | 20 +- vendor/google.golang.org/grpc/stream.go | 32 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/modules.txt | 15 +- 43 files changed, 14326 insertions(+), 184 deletions(-) create mode 100644 vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go create mode 100644 vendor/go.opentelemetry.io/otel/.clomonitor.yml create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go diff --git a/go.mod b/go.mod index 555b56516d2..b6cdf3b7c53 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( golang.org/x/crypto v0.41.0 golang.org/x/net v0.43.0 golang.org/x/sys v0.35.0 - google.golang.org/grpc v1.74.2 + google.golang.org/grpc v1.75.0 google.golang.org/protobuf v1.36.7 k8s.io/api v0.33.3 k8s.io/apimachinery v0.33.3 @@ -86,7 +86,7 @@ require ( github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gemalto/flume v0.13.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -136,9 +136,9 @@ require ( github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect - go.opentelemetry.io/otel v1.36.0 // indirect - go.opentelemetry.io/otel/metric v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect @@ -147,7 +147,7 @@ require ( golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index d97d6b1f6d2..d9990b979c8 100644 --- a/go.sum +++ b/go.sum @@ -217,8 +217,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -662,16 +662,16 @@ go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJyS go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1137,6 +1137,8 @@ golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1260,8 +1262,8 @@ google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1293,8 +1295,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11 google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= -google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md index 6f717dbd86e..66a8a0f89a6 100644 --- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md @@ -1,3 +1,8 @@ +## Changed + + - Defined a custom error, ErrUnexpectedSignatureAlgorithm, returned when a JWS + header contains an unsupported signature algorithm. + # v4.0.4 ## Fixed diff --git a/vendor/github.com/go-jose/go-jose/v4/jwe.go b/vendor/github.com/go-jose/go-jose/v4/jwe.go index 9f1322dccc9..6102f910005 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwe.go @@ -274,7 +274,7 @@ func validateAlgEnc(headers rawHeader, keyAlgorithms []KeyAlgorithm, contentEncr if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) { return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms) } - if alg != "" && !containsContentEncryption(contentEncryption, enc) { + if enc != "" && !containsContentEncryption(contentEncryption, enc) { return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption) } return nil @@ -288,11 +288,20 @@ func ParseEncryptedCompact( keyAlgorithms []KeyAlgorithm, contentEncryption []ContentEncryption, ) (*JSONWebEncryption, error) { - // Five parts is four separators - if strings.Count(input, ".") != 4 { - return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts") + var parts [5]string + var ok bool + + for i := range 4 { + parts[i], input, ok = strings.Cut(input, ".") + if !ok { + return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts") + } + } + // Validate that the last part does not contain more dots + if strings.ContainsRune(input, '.') { + return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts") } - parts := strings.SplitN(input, ".", 5) + parts[4] = input rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) if err != nil { diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go index 9e57e93ba2e..9700f8906c4 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -239,7 +239,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { keyPub = key } } else { - return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv) + return fmt.Errorf("go-jose/go-jose: unknown curve '%s'", raw.Crv) } default: return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty) diff --git a/vendor/github.com/go-jose/go-jose/v4/jws.go b/vendor/github.com/go-jose/go-jose/v4/jws.go index d09d8ba5078..c40bd3ec10a 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jws.go +++ b/vendor/github.com/go-jose/go-jose/v4/jws.go @@ -75,7 +75,14 @@ type Signature struct { original *rawSignatureInfo } -// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. +// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. Validation fails if +// the JWS is signed with an algorithm that isn't in the provided list of signature algorithms. +// Applications should decide for themselves which signature algorithms are acceptable. If you're +// not sure which signature algorithms your application might receive, consult the documentation of +// the program which provides them or the protocol that you are implementing. You can also try +// getting an example JWS and decoding it with a tool like https://jwt.io to see what its "alg" +// header parameter indicates. The signature on the JWS does not get validated during parsing. Call +// Verify() after parsing to validate the signature and obtain the payload. // // https://datatracker.ietf.org/doc/html/rfc7515#section-7 func ParseSigned( @@ -90,7 +97,14 @@ func ParseSigned( return parseSignedCompact(signature, nil, signatureAlgorithms) } -// ParseSignedCompact parses a message in JWS Compact Serialization. +// ParseSignedCompact parses a message in JWS Compact Serialization. Validation fails if the JWS is +// signed with an algorithm that isn't in the provided list of signature algorithms. Applications +// should decide for themselves which signature algorithms are acceptable.If you're not sure which +// signature algorithms your application might receive, consult the documentation of the program +// which provides them or the protocol that you are implementing. You can also try getting an +// example JWS and decoding it with a tool like https://jwt.io to see what its "alg" header +// parameter indicates. The signature on the JWS does not get validated during parsing. Call +// Verify() after parsing to validate the signature and obtain the payload. // // https://datatracker.ietf.org/doc/html/rfc7515#section-7.1 func ParseSignedCompact( @@ -101,6 +115,15 @@ func ParseSignedCompact( } // ParseDetached parses a signed message in compact serialization format with detached payload. +// Validation fails if the JWS is signed with an algorithm that isn't in the provided list of +// signature algorithms. Applications should decide for themselves which signature algorithms are +// acceptable. If you're not sure which signature algorithms your application might receive, consult +// the documentation of the program which provides them or the protocol that you are implementing. +// You can also try getting an example JWS and decoding it with a tool like https://jwt.io to see +// what its "alg" header parameter indicates. The signature on the JWS does not get validated during +// parsing. Call Verify() after parsing to validate the signature and obtain the payload. +// +// https://datatracker.ietf.org/doc/html/rfc7515#appendix-F func ParseDetached( signature string, payload []byte, @@ -181,6 +204,25 @@ func containsSignatureAlgorithm(haystack []SignatureAlgorithm, needle SignatureA return false } +// ErrUnexpectedSignatureAlgorithm is returned when the signature algorithm in +// the JWS header does not match one of the expected algorithms. +type ErrUnexpectedSignatureAlgorithm struct { + // Got is the signature algorithm found in the JWS header. + Got SignatureAlgorithm + expected []SignatureAlgorithm +} + +func (e *ErrUnexpectedSignatureAlgorithm) Error() string { + return fmt.Sprintf("unexpected signature algorithm %q; expected %q", e.Got, e.expected) +} + +func newErrUnexpectedSignatureAlgorithm(got SignatureAlgorithm, expected []SignatureAlgorithm) error { + return &ErrUnexpectedSignatureAlgorithm{ + Got: got, + expected: expected, + } +} + // sanitized produces a cleaned-up JWS object from the raw JSON. func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgorithm) (*JSONWebSignature, error) { if len(signatureAlgorithms) == 0 { @@ -236,8 +278,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo alg := SignatureAlgorithm(signature.Header.Algorithm) if !containsSignatureAlgorithm(signatureAlgorithms, alg) { - return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", - alg, signatureAlgorithms) + return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms) } if signature.header != nil { @@ -285,8 +326,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo alg := SignatureAlgorithm(obj.Signatures[i].Header.Algorithm) if !containsSignatureAlgorithm(signatureAlgorithms, alg) { - return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", - alg, signatureAlgorithms) + return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms) } if obj.Signatures[i].header != nil { @@ -321,35 +361,43 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo return obj, nil } +const tokenDelim = "." + // parseSignedCompact parses a message in compact format. func parseSignedCompact( input string, payload []byte, signatureAlgorithms []SignatureAlgorithm, ) (*JSONWebSignature, error) { - // Three parts is two separators - if strings.Count(input, ".") != 2 { + protected, s, ok := strings.Cut(input, tokenDelim) + if !ok { // no period found + return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") + } + claims, sig, ok := strings.Cut(s, tokenDelim) + if !ok { // only one period found + return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") + } + if strings.ContainsRune(sig, '.') { // too many periods found return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") } - parts := strings.SplitN(input, ".", 3) - if parts[1] != "" && payload != nil { + if claims != "" && payload != nil { return nil, fmt.Errorf("go-jose/go-jose: payload is not detached") } - rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) + rawProtected, err := base64.RawURLEncoding.DecodeString(protected) if err != nil { return nil, err } if payload == nil { - payload, err = base64.RawURLEncoding.DecodeString(parts[1]) + payload, err = base64.RawURLEncoding.DecodeString(claims) if err != nil { return nil, err } } - signature, err := base64.RawURLEncoding.DecodeString(parts[2]) + signature, err := base64.RawURLEncoding.DecodeString(sig) if err != nil { return nil, err } diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric.go b/vendor/github.com/go-jose/go-jose/v4/symmetric.go index a69103b084e..6176e060740 100644 --- a/vendor/github.com/go-jose/go-jose/v4/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric.go @@ -30,8 +30,6 @@ import ( "hash" "io" - "golang.org/x/crypto/pbkdf2" - josecipher "github.com/go-jose/go-jose/v4/cipher" ) @@ -330,7 +328,10 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie // derive key keyLen, h := getPbkdf2Params(alg) - key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h) + key, err := pbkdf2Key(h, string(ctx.key), salt, ctx.p2c, keyLen) + if err != nil { + return recipientInfo{}, nil + } // use AES cipher with derived key block, err := aes.NewCipher(key) @@ -432,7 +433,10 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien // derive key keyLen, h := getPbkdf2Params(alg) - key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h) + key, err := pbkdf2Key(h, string(ctx.key), salt, p2c, keyLen) + if err != nil { + return nil, err + } // use AES cipher with derived key block, err := aes.NewCipher(key) diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go b/vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go new file mode 100644 index 00000000000..6c5a4e7f202 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go @@ -0,0 +1,28 @@ +//go:build go1.24 + +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/pbkdf2" + "hash" +) + +func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLen int) ([]byte, error) { + return pbkdf2.Key(h, password, salt, iter, keyLen) +} diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go b/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go new file mode 100644 index 00000000000..bdfc3d76637 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go @@ -0,0 +1,29 @@ +//go:build !go1.24 + +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "hash" + + "golang.org/x/crypto/pbkdf2" +) + +func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLen int) ([]byte, error) { + return pbkdf2.Key([]byte(password), salt, iter, keyLen, h), nil +} diff --git a/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/vendor/go.opentelemetry.io/otel/.clomonitor.yml new file mode 100644 index 00000000000..128d61a2260 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.clomonitor.yml @@ -0,0 +1,3 @@ +exemptions: + - check: artifacthub_badge + reason: "Artifact Hub doesn't support Go packages" diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 888e5da8028..5f69cc027c2 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -66,8 +66,6 @@ linters: desc: Do not use cross-module internal packages. - pkg: go.opentelemetry.io/otel/internal/internaltest desc: Do not use cross-module internal packages. - - pkg: go.opentelemetry.io/otel/internal/matchers - desc: Do not use cross-module internal packages. otlp-internal: files: - '!**/exporters/otlp/internal/**/*.go' @@ -190,6 +188,10 @@ linters: - legacy - std-error-handling rules: + - linters: + - revive + path: schema/v.*/types/.* + text: avoid meaningless package names # TODO: Having appropriate comments for exported objects helps development, # even for objects in internal packages. Appropriate comments for all # exported objects should be added and this exclusion removed. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 648e4abab8c..4acc75701b7 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,61 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.37.0/0.59.0/0.13.0] 2025-06-25 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.33.0` package. + The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799) +- The `go.opentelemetry.io/otel/semconv/v1.34.0` package. + The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812) +- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825) +- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839) + +### Changed + +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836) +- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864) +- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898) + +### Fixed + +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710) +- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822) +- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914) + +### Removed + +- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770) + +## [0.12.2] 2025-05-22 + +### Fixed + +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804) + +## [0.12.1] 2025-05-21 + +### Fixes + +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800) + ## [1.36.0/0.58.0/0.12.0] 2025-05-20 ### Added @@ -3288,7 +3343,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD +[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 +[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 +[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 [1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 [1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 1902dac057a..f9ddc281fc7 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -109,10 +109,9 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * The qualified approvals need to be from [Approver]s/[Maintainer]s - affiliated with different companies. Two qualified approvals from - [Approver]s or [Maintainer]s affiliated with the same company counts as a - single qualified approval. + * At least one of the qualified approvals need to be from an + [Approver]/[Maintainer] affiliated with a different company than the author + of the PR. * PRs introducing changes that have already been discussed and consensus reached only need one qualified approval. The discussion and resolution needs to be linked to the PR. @@ -650,11 +649,11 @@ should be canceled. ### Maintainers -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [David Ashpole](https://github.com/dashpole), Google -- [Robert Pająk](https://github.com/pellared), Splunk -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [Tyler Yahn](https://github.com/MrAlias), Splunk +- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) +- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70)) +- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2)) +- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) +- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) ### Emeritus diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 62a56f4d341..4fa423ca02d 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -293,7 +293,7 @@ semconv-generate: $(SEMCONVKIT) --param tag=$(TAG) \ go \ /home/weaver/target - $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)" .PHONY: gorelease gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index b6007881212..5fa1b75c60e 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -7,6 +7,7 @@ [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 7c1a9119dcc..1ddcdef0396 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -112,6 +112,29 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. +### Sign the Release Artifact + +To ensure we comply with CNCF best practices, we need to sign the release artifact. +The tarball attached to the GitHub release needs to be signed with your GPG key. + +Follow [these steps] to sign the release artifact and upload it to GitHub. +You can use [this script] to verify the contents of the tarball before signing it. + +Be sure to use the correct GPG key when signing the release artifact. + +```terminal +gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz +``` + +You can verify the signature with: + +```terminal +gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz +``` + +[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases +[this script]: https://github.com/MrAlias/attest-sh + ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index 51fb76b30d0..935bd487631 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.3-slim-bullseye@sha256:9e3f9243e06fd68eb9519074b49878eda20ad39a855fac51aaffb741de20726e AS python -FROM otel/weaver:v0.15.0@sha256:1cf1c72eaed57dad813c2e359133b8a15bd4facf305aae5b13bdca6d3eccff56 AS weaver +FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python +FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md new file mode 100644 index 00000000000..02b56115e3c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md @@ -0,0 +1,4 @@ + +# Migration from v1.33.0 to v1.34.0 + +The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md new file mode 100644 index 00000000000..fab06c97526 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.34.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.34.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go new file mode 100644 index 00000000000..5b56662573a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go @@ -0,0 +1,13851 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found [here]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found [here]. +// +// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found [here] + // . + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [here]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(val string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // ec2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: az +const ( + // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic + // conventions. It represents the [Azure Resource Provider Namespace] as + // recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzNamespaceKey = attribute.Key("az.namespace") + + // AzServiceRequestIDKey is the attribute Key conforming to the + // "az.service_request_id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzServiceRequestIDKey = attribute.Key("az.service_request_id") +) + +// AzNamespace returns an attribute KeyValue conforming to the "az.namespace" +// semantic conventions. It represents the [Azure Resource Provider Namespace] as +// recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzNamespace(val string) attribute.KeyValue { + return AzNamespaceKey.String(val) +} + +// AzServiceRequestID returns an attribute KeyValue conforming to the +// "az.service_request_id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzServiceRequestID(val string) attribute.KeyValue { + return AzServiceRequestIDKey.String(val) +} + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // bounded_staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // consistent_prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // all + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // each_quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // local_quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // one + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // local_one + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // local_serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container runtime + // managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container runtime +// managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // user + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // system + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // iowait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found [here] + // . + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [here]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: development + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: development + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: development + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: development + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: development + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: development + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: development + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: development + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: development + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifer" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.request.service_tier" semantic conventions. It represents the + // service tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") + + // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") + + // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to + // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It + // represents a fingerprint to track any eventual change in the Generative AI + // environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as identified + // by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: openai + // Note: The `gen_ai.system` describes a family of GenAI models with specific + // model identified + // by `gen_ai.request.model` and `gen_ai.response.model` attributes. + // + // The actual GenAI product may differ from the one identified by the client. + // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI + // client + // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge, instead of the actual system. The + // `server.address` + // attribute may help identify the actual system in use for `openai`. + // + // For custom model, a custom friendly name SHOULD be used. + // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` + // . + GenAISystemKey = attribute.Key("gen_ai.system") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "gen_ai.openai.response.service_tier" semantic conventions. It represents the +// service tier used for the response. +func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue { + return GenAIOpenAIResponseServiceTierKey.String(val) +} + +// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming +// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It +// represents a fingerprint to track any eventual change in the Generative AI +// environment. +func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return GenAIOpenAIResponseSystemFingerprintKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default") +) + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.system +var ( + // OpenAI + // Stability: development + GenAISystemOpenAI = GenAISystemKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai") + // Vertex AI + // Stability: development + GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai") + // Gemini + // Stability: development + GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini") + // Deprecated: Use 'gcp.vertex_ai' instead. + GenAISystemVertexAI = GenAISystemKey.String("vertex_ai") + // Deprecated: Use 'gcp.gemini' instead. + GenAISystemGemini = GenAISystemKey.String("gemini") + // Anthropic + // Stability: development + GenAISystemAnthropic = GenAISystemKey.String("anthropic") + // Cohere + // Stability: development + GenAISystemCohere = GenAISystemKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference") + // Azure OpenAI + // Stability: development + GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai") + // IBM Watsonx AI + // Stability: development + GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") + // AWS Bedrock + // Stability: development + GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") + // Perplexity + // Stability: development + GenAISystemPerplexity = GenAISystemKey.String("perplexity") + // xAI + // Stability: development + GenAISystemXai = GenAISystemKey.String("xai") + // DeepSeek + // Stability: development + GenAISystemDeepseek = GenAISystemKey.String("deepseek") + // Groq + // Stability: development + GenAISystemGroq = GenAISystemKey.String("groq") + // Mistral AI + // Stability: development + GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Deprecated: Replaced by `output`. + GenAITokenTypeCompletion = GenAITokenTypeKey.String("output") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") +) + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// Enum values for hw.state +var ( + // Ok + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") + // Deprecated: Replaced by `process`. + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver") + // Deprecated: Replaced by `send`. + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("z_os") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // used + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // Deprecated: Removed, report shared memory usage with + // `metric.system.memory.shared` metric. + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // Deprecated: Replaced by `gitea`. + VCSProviderNameGittea = VCSProviderNameKey.String("gittea") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go new file mode 100644 index 00000000000..2c5c7ebd041 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.34.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go new file mode 100644 index 00000000000..88a998f1e56 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go new file mode 100644 index 00000000000..3c23d459254 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.34.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index d90af8f673c..f3aa398138e 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ac3c0b15da2..7afe92b5981 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 79f82f3d058..9d4742a1764 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,13 +3,12 @@ module-sets: stable-v1: - version: v1.36.0 + version: v1.37.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -23,14 +22,16 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.58.0 + version: v0.59.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.12.0 + version: v0.13.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/log/logtest - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog @@ -40,6 +41,4 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/log/logtest - - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 5d4096d46a0..df35bb9a882 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,19 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) -- [aranjans](https://github.com/aranjans), Google LLC - [arjan-bal](https://github.com/arjan-bal), Google LLC - [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [erm-g](https://github.com/erm-g), Google LLC - [gtcooke94](https://github.com/gtcooke94), Google LLC -- [purnesh42h](https://github.com/purnesh42h), Google LLC -- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez) +- [aranjans](https://github.com/aranjans) - [canguler](https://github.com/canguler) - [cesarghali](https://github.com/cesarghali) +- [erm-g](https://github.com/erm-g) - [iamqizhao](https://github.com/iamqizhao) - [jeanbza](https://github.com/jeanbza) - [jtattermusch](https://github.com/jtattermusch) @@ -32,5 +30,7 @@ for general contribution guidelines. - [matt-kwong](https://github.com/matt-kwong) - [menghanl](https://github.com/menghanl) - [nicolasnoble](https://github.com/nicolasnoble) +- [purnesh42h](https://github.com/purnesh42h) - [srini100](https://github.com/srini100) - [yongni](https://github.com/yongni) +- [zasweq](https://github.com/zasweq) diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 0ad6bb1f220..360db08ebc1 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -37,6 +37,8 @@ import ( "google.golang.org/grpc/resolver" ) +var randIntN = rand.IntN + // ChildState is the balancer state of a child along with the endpoint which // identifies the child balancer. type ChildState struct { @@ -112,6 +114,21 @@ type endpointSharding struct { mu sync.Mutex } +// rotateEndpoints returns a slice of all the input endpoints rotated a random +// amount. +func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint { + les := len(es) + if les == 0 { + return es + } + r := randIntN(les) + // Make a copy to avoid mutating data beyond the end of es. + ret := make([]resolver.Endpoint, les) + copy(ret, es[r:]) + copy(ret[les-r:], es[:r]) + return ret +} + // UpdateClientConnState creates a child for new endpoints and deletes children // for endpoints that are no longer present. It also updates all the children, // and sends a single synchronous update of the childrens' aggregated state at @@ -133,7 +150,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState newChildren := resolver.NewEndpointMap[*balancerWrapper]() // Update/Create new children. - for _, endpoint := range state.ResolverState.Endpoints { + for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) { if _, ok := newChildren.Get(endpoint); ok { // Endpoint child was already created, continue to avoid duplicate // update. @@ -279,7 +296,7 @@ func (es *endpointSharding) updateState() { p := &pickerWithChildStates{ pickers: pickers, childStates: childStates, - next: uint32(rand.IntN(len(pickers))), + next: uint32(randIntN(len(pickers))), } es.cc.UpdateState(balancer.State{ ConnectivityState: aggState, diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index e62047256af..67f315a0dbc 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -67,21 +67,21 @@ var ( disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.disconnections", Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "disconnection", + Unit: "{disconnection}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_succeeded", Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_failed", Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index cd3eaf8ddcb..3f762285db7 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -208,7 +208,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.pickerWrapper = newPickerWrapper() cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) @@ -1076,13 +1076,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ - Ctx: ctx, - FullMethodName: method, - }) -} - func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. @@ -1831,7 +1824,7 @@ func (cc *ClientConn) initAuthority() error { } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { cc.authority = auth.OverrideAuthority(cc.parsedTarget) } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint + cc.authority = "localhost" + encodeAuthority(endpoint) } else { cc.authority = encodeAuthority(endpoint) } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index a63ab606e66..c8e337cdda0 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -96,10 +96,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. +// ProtocolInfo provides static information regarding transport credentials. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. + // + // Deprecated: this is unused by gRPC. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string @@ -109,7 +110,16 @@ type ProtocolInfo struct { // // Deprecated: please use Peer.AuthInfo. SecurityVersion string - // ServerName is the user-configured server name. + // ServerName is the user-configured server name. If set, this overrides + // the default :authority header used for all RPCs on the channel using the + // containing credentials, unless grpc.WithAuthority is set on the channel, + // in which case that setting will take precedence. + // + // This must be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: Users should use grpc.WithAuthority to override the authority + // on a channel instead of configuring the credentials. ServerName string } @@ -173,12 +183,17 @@ type TransportCredentials interface { // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials // OverrideServerName specifies the value used for the following: + // // - verifying the hostname on the returned certificates // - as SNI in the client's handshake to support virtual hosting // - as the value for `:authority` header at stream creation time // - // Deprecated: use grpc.WithAuthority instead. Will be supported - // throughout 1.x. + // The provided string should be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: this method is unused by gRPC. Users should use + // grpc.WithAuthority to override the authority on a channel instead of + // configuring the credentials. OverrideServerName(string) error } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 20f65f7bd95..8277be7d6f8 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -110,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := credinternal.CloneTLSConfig(c.config) - if cfg.ServerName == "" { - serverName, _, err := net.SplitHostPort(authority) - if err != nil { - // If the authority had no host port or if the authority cannot be parsed, use it as-is. - serverName = authority - } - cfg.ServerName = serverName + + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority } + cfg.ServerName = serverName + conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { @@ -259,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config { // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } @@ -271,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := os.ReadFile(certFile) if err != nil { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index ec0ca89ccdc..7a5ac2e7c49 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -608,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header and as the server name in authentication handshake. +// This overrides all other ways of setting authority on the channel, but can be +// overridden per-call by using grpc.CallAuthority. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 2fdaed88dbd..7e060f5ed13 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,26 +26,32 @@ import ( ) var ( - // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + // EnableTXTServiceConfig is set if the DNS resolver should perform TXT + // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not + // "false"). + EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true) + + // TXTErrIgnore is set if TXT errors should be ignored + // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // XDSFallbackSupport is the env variable that controls whether support for - // xDS fallback is turned on. If this is unset or is false, only the first - // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 3ac798e8e60..2699223a27f 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -182,35 +182,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster - // Specifier Plugin for testing purposes, regardless of the XDSRLS environment - // variable. - // - // TODO: Remove this function once the RLS env var is removed. - RegisterRLSClusterSpecifierPluginForTesting func() - - // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster - // Specifier Plugin for testing purposes. This is needed because there is no way - // to unregister the RLS Cluster Specifier Plugin after registering it solely - // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). - // - // TODO: Remove this function once the RLS env var is removed. - UnregisterRLSClusterSpecifierPluginForTesting func() - - // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing - // purposes, regardless of the RBAC environment variable. - // - // TODO: Remove this function once the RBAC env var is removed. - RegisterRBACHTTPFilterForTesting func() - - // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for - // testing purposes. This is needed because there is no way to unregister the - // HTTP Filter after registering it solely for testing purposes using - // RegisterRBACHTTPFilterForTesting(). - // - // TODO: Remove this function once the RBAC env var is removed. - UnregisterRBACHTTPFilterForTesting func() - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index ba5c5a95d0d..ada5251cff3 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig, } d.resolver, err = internal.NewNetResolver(target.URL.Host) @@ -181,8 +181,8 @@ type dnsResolver struct { // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool + wg sync.WaitGroup + enableServiceConfig bool } // ResolveNow invoke an immediate resolution of the target that this @@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if len(srv) > 0 { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } - if !d.disableServiceConfig { + if d.enableServiceConfig { state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a2d2a798d48..aa52bfe95fd 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -48,14 +47,11 @@ type pickerGeneration struct { // actions and unblock when there's a picker update. type pickerWrapper struct { // If pickerGen holds a nil pointer, the pickerWrapper is closed. - pickerGen atomic.Pointer[pickerGeneration] - statsHandlers []stats.Handler // to record blocking picker calls + pickerGen atomic.Pointer[pickerGeneration] } -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - pw := &pickerWrapper{ - statsHandlers: statsHandlers, - } +func newPickerWrapper() *pickerWrapper { + pw := &pickerWrapper{} pw.pickerGen.Store(&pickerGeneration{ blockingCh: make(chan struct{}), }) @@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { } } +type pick struct { + transport transport.ClientTransport // the selected transport + result balancer.PickResult // the contents of the pick from the LB policy + blocked bool // set if a picker call queued for a new picker +} + // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker @@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) { var ch chan struct{} var lastPickErr error + pickBlocked := false for { pg := pw.pickerGen.Load() if pg == nil { - return nil, balancer.PickResult{}, ErrClientConnClosing + return pick{}, ErrClientConnClosing } if pg.picker == nil { ch = pg.blockingCh @@ -127,9 +130,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + return pick{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + return pick{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // In the second case, the only way it will get to this conditional is // if there is a new picker. if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } + pickBlocked = true } ch = pg.blockingCh @@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, balancer.PickResult{}, dropError{error: err} + return pick{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + return pick{}, status.Error(codes.Unavailable, err.Error()) } acbw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil } - return t, pickResult, nil + return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index b84ef26d46d..8e6af9514b6 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -332,6 +332,11 @@ type AuthorityOverrider interface { // OverrideAuthority returns the authority to use for a ClientConn with the // given target. The implementation must generate it without blocking, // typically in line, and must keep it unchanged. + // + // The returned string must be a valid ":authority" header value, i.e. be + // encoded according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as + // necessary. OverrideAuthority(Target) string } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 70fe23f5502..1da2a542acd 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1598,6 +1598,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), + desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index baf7740efba..10bf998aa5b 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -64,15 +64,21 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} +// DelayedPickComplete indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +type DelayedPickComplete struct{} -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } +// IsClient indicates DelayedPickComplete is available on the client. +func (*DelayedPickComplete) IsClient() bool { return true } -func (*PickerUpdated) isRPCStats() {} +func (*DelayedPickComplete) isRPCStats() {} + +// PickerUpdated indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +// +// Deprecated: will be removed in a future release; use DelayedPickComplete +// instead. +type PickerUpdated = DelayedPickComplete // InPayload contains stats about an incoming payload. type InPayload struct { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index ca6948926f9..d9bbd4c57cf 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -469,8 +469,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) func (a *csAttempt) getTransport() error { cs := a.cs - var err error - a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method} + pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo) + a.transport, a.pickResult = pick.transport, pick.result if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -481,6 +482,11 @@ func (a *csAttempt) getTransport() error { if a.trInfo != nil { a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } + if pick.blocked { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) + } + } return nil } @@ -1580,6 +1586,7 @@ type serverStream struct { s *transport.ServerStream p *parser codec baseCodec + desc *StreamDesc compressorV0 Compressor compressorV1 encoding.Compressor @@ -1588,6 +1595,8 @@ type serverStream struct { sendCompressorName string + recvFirstMsg bool // set after the first message is received + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1774,6 +1783,10 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, chc) } } + // Received no request msg for non-client streaming rpcs. + if !ss.desc.ClientStreams && !ss.recvFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC") + } return err } if err == io.ErrUnexpectedEOF { @@ -1781,6 +1794,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } return toRPCErr(err) } + ss.recvFirstMsg = true if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ @@ -1800,7 +1814,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, cm) } } - return nil + + if ss.desc.ClientStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-client-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + return nil + } else if err != nil { + return err + } + return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC") } // MethodFromServerStream returns the method string for the input stream. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 8b0e5f973d6..bc1eb290f69 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.74.2" +const Version = "1.75.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index c366991f838..db5ef47fd51 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -263,8 +263,8 @@ github.com/gemalto/kmip-go/ttlv # github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 ## explicit github.com/ghodss/yaml -# github.com/go-jose/go-jose/v4 v4.0.5 -## explicit; go 1.21 +# github.com/go-jose/go-jose/v4 v4.1.1 +## explicit; go 1.23.0 github.com/go-jose/go-jose/v4 github.com/go-jose/go-jose/v4/cipher github.com/go-jose/go-jose/v4/json @@ -494,7 +494,7 @@ go.opentelemetry.io/auto/sdk/internal/telemetry ## explicit; go 1.22.7 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/otel v1.36.0 +# go.opentelemetry.io/otel v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -506,12 +506,13 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/metric v1.36.0 +go.opentelemetry.io/otel/semconv/v1.34.0 +# go.opentelemetry.io/otel/metric v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/trace v1.36.0 +# go.opentelemetry.io/otel/trace v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -589,10 +590,10 @@ golang.org/x/time/rate # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.74.2 +# google.golang.org/grpc v1.75.0 ## explicit; go 1.23.0 google.golang.org/grpc google.golang.org/grpc/attributes From b8a07b6ce8b0102b4de7296841658bbe2b1b73cb Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Thu, 21 Aug 2025 12:26:03 +0200 Subject: [PATCH 036/157] build: use Go 1.24.4 While building Ceph-CSI the Go toolchain always downloads Go 1.24.4. It saves time (and bandwidth) when that version is installed from the start. Dependabot seems to have removed the toolchain directive, maybe the automatic downloading does not happen (always) anymore? Signed-off-by: Niels de Vos --- build.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.env b/build.env index d36f2c4e8d7..3c3e76e62e0 100644 --- a/build.env +++ b/build.env @@ -26,7 +26,7 @@ BASE_IMAGE=quay.io/ceph/ceph:v19.2.2 CEPH_VERSION=squid # standard Golang options -GOLANG_VERSION=1.24.2 +GOLANG_VERSION=1.24.4 GO111MODULE=on # commitlint version From 32975b7c95755f520912fdbdba7e91c6851868ed Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Thu, 21 Aug 2025 12:47:36 +0200 Subject: [PATCH 037/157] build: remove `ceph_preview` build-tag go-ceph v0.35 contains all APIs that are needed to build Ceph-CSI, there is no need for unreleased APIs that are exposed only with the `ceph_preview` tag. Signed-off-by: Niels de Vos --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 7d5bc994ca8..659154deed0 100644 --- a/Makefile +++ b/Makefile @@ -52,8 +52,8 @@ endif GO_PROJECT=github.com/ceph/ceph-csi CEPH_VERSION ?= $(shell . $(CURDIR)/build.env ; echo $${CEPH_VERSION}) -# TODO: ceph_preview tag required for FSQuiesce API -GO_TAGS_LIST ?= $(CEPH_VERSION) ceph_preview +# The 'ceph_preview' tag may be needed for unstable/unreleased APIs. +GO_TAGS_LIST ?= $(CEPH_VERSION) # CephCSI currently has 4 modules in these directories. GO_MODULES_LIST = ./ e2e/ api/ actions/retest From b39e9fd6cd21bab2b3e96d86a2bcef37368b1c22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 21:41:04 +0000 Subject: [PATCH 038/157] rebase: bump k8s.io/api in /api in the k8s-dependencies group Bumps the k8s-dependencies group in /api with 1 update: [k8s.io/api](https://github.com/kubernetes/api). Updates `k8s.io/api` from 0.33.3 to 0.33.4 - [Commits](https://github.com/kubernetes/api/compare/v0.33.3...v0.33.4) --- updated-dependencies: - dependency-name: k8s.io/api dependency-version: 0.33.4 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] --- api/go.mod | 4 ++-- api/go.sum | 8 ++++---- api/vendor/modules.txt | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/go.mod b/api/go.mod index f62c0279fe2..41174931474 100644 --- a/api/go.mod +++ b/api/go.mod @@ -8,7 +8,7 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/openshift/api v0.0.0-20240115183315-0793e918179d github.com/stretchr/testify v1.10.0 - k8s.io/api v0.33.3 + k8s.io/api v0.33.4 ) require ( @@ -27,7 +27,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apimachinery v0.33.3 // indirect + k8s.io/apimachinery v0.33.4 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect diff --git a/api/go.sum b/api/go.sum index 05a889ccf7e..54986c30da5 100644 --- a/api/go.sum +++ b/api/go.sum @@ -83,10 +83,10 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= -k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= -k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= -k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= +k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= diff --git a/api/vendor/modules.txt b/api/vendor/modules.txt index f91f85b5629..6f80007773d 100644 --- a/api/vendor/modules.txt +++ b/api/vendor/modules.txt @@ -61,12 +61,12 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.33.3 +# k8s.io/api v0.33.4 ## explicit; go 1.24.0 k8s.io/api/core/v1 k8s.io/api/rbac/v1 k8s.io/api/storage/v1 -# k8s.io/apimachinery v0.33.3 +# k8s.io/apimachinery v0.33.4 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/operation k8s.io/apimachinery/pkg/api/resource From 59f27a4ce93da7a0a9501d0121b7d3d8880482e9 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Fri, 29 Aug 2025 15:12:19 +0530 Subject: [PATCH 039/157] rebase: bump k8s-dependencies group to 0.33.4 This commit updates following packages to v0.33.4: - k8s.io/api - k8s.io/apimachinery - k8s.io/cloud-provider - k8s.io/mount-utils - k8s.io/client-go Signed-off-by: Praveen M --- go.mod | 20 ++++++++++---------- go.sum | 42 ++++++++++++++++++++---------------------- vendor/modules.txt | 22 +++++++++++----------- 3 files changed, 41 insertions(+), 43 deletions(-) diff --git a/go.mod b/go.mod index b6cdf3b7c53..196e8b1b5d6 100644 --- a/go.mod +++ b/go.mod @@ -34,12 +34,12 @@ require ( golang.org/x/sys v0.35.0 google.golang.org/grpc v1.75.0 google.golang.org/protobuf v1.36.7 - k8s.io/api v0.33.3 - k8s.io/apimachinery v0.33.3 - k8s.io/cloud-provider v0.33.0 + k8s.io/api v0.33.4 + k8s.io/apimachinery v0.33.4 + k8s.io/cloud-provider v0.33.4 k8s.io/klog/v2 v2.130.1 k8s.io/kubernetes v1.33.4 - k8s.io/mount-utils v0.33.3 + k8s.io/mount-utils v0.33.4 k8s.io/utils v0.0.0-20241210054802-24370beab758 ) @@ -49,7 +49,7 @@ require ( sigs.k8s.io/controller-runtime v0.21.0 ) -replace k8s.io/client-go => k8s.io/client-go v0.33.0 +replace k8s.io/client-go => k8s.io/client-go v0.33.4 exclude ( // missing tag, referred to by github.com/hashicorp/go-kms-wrapping@v0.5.1 @@ -152,11 +152,11 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.33.1 // indirect - k8s.io/apiserver v0.33.1 // indirect - k8s.io/component-base v0.33.1 // indirect - k8s.io/controller-manager v0.33.0 // indirect - k8s.io/csi-translation-lib v0.33.0 // indirect + k8s.io/apiextensions-apiserver v0.33.4 // indirect + k8s.io/apiserver v0.33.4 // indirect + k8s.io/component-base v0.33.4 // indirect + k8s.io/controller-manager v0.33.4 // indirect + k8s.io/csi-translation-lib v0.33.4 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/randfill v1.0.0 // indirect diff --git a/go.sum b/go.sum index d9990b979c8..3b0bb687158 100644 --- a/go.sum +++ b/go.sum @@ -1356,27 +1356,25 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= -k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= -k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= -k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= -k8s.io/apiextensions-apiserver v0.33.1 h1:N7ccbSlRN6I2QBcXevB73PixX2dQNIW0ZRuguEE91zI= -k8s.io/apiextensions-apiserver v0.33.1/go.mod h1:uNQ52z1A1Gu75QSa+pFK5bcXc4hq7lpOXbweZgi4dqA= +k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= +k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= +k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= +k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= -k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.1 h1:yLgLUPDVC6tHbNcw5uE9mo1T6ELhJj7B0geifra3Qdo= -k8s.io/apiserver v0.33.1/go.mod h1:VMbE4ArWYLO01omz+k8hFjAdYfc3GVAYPrhP2tTKccs= -k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= -k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= -k8s.io/cloud-provider v0.33.0 h1:nVU2Q9QK7O50yaNx+pE61oDPqflsSsKygN43f5js9+I= -k8s.io/cloud-provider v0.33.0/go.mod h1:2reyEBbsimZJKHF325vxLBD5fcJGNeJHeLjJ+jGM8Qg= -k8s.io/component-base v0.33.1 h1:EoJ0xA+wr77T+G8p6T3l4efT2oNwbqBVKR71E0tBIaI= -k8s.io/component-base v0.33.1/go.mod h1:guT/w/6piyPfTgq7gfvgetyXMIh10zuXA6cRRm3rDuY= -k8s.io/controller-manager v0.33.0 h1:O9LnTjffOe62d66gMcKLuPXsBjY5sqETWEIzg+DVL8w= -k8s.io/controller-manager v0.33.0/go.mod h1:vQwAQnroav4+UyE2acW1Rj6CSsHPzr2/018kgRLYqlI= -k8s.io/csi-translation-lib v0.33.0 h1:kW3xVPCTXmHmK5v/8PEVZCUZSdNRndiQat0SbN31qEM= -k8s.io/csi-translation-lib v0.33.0/go.mod h1:Ldx85t1WxFStKQ2p5Xaimv39IkTc7/m8yNMkt8MlyoQ= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.4 h1:6N0TEVA6kASUS3owYDIFJjUH6lgN8ogQmzZvaFFj1/Y= +k8s.io/apiserver v0.33.4/go.mod h1:8ODgXMnOoSPLMUg1aAzMFx+7wTJM+URil+INjbTZCok= +k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= +k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +k8s.io/cloud-provider v0.33.4 h1:et4DyeV0W8W+m2ByS34VVFMg8Aj0sz+UDVwanNkspTo= +k8s.io/cloud-provider v0.33.4/go.mod h1:cAC2s7mGpqVWwUars8TFgnvgXy+trDOF3+WSeKNsy/M= +k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= +k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= +k8s.io/controller-manager v0.33.4 h1:HmlzmmNPu8H+cKEpAIRz0ptqpveKcj7KrCx9G+HXRAg= +k8s.io/controller-manager v0.33.4/go.mod h1:CpO8RarLcs7zh0sE4pqz88quF3xU3Dc4ZDfshnB8hw4= +k8s.io/csi-translation-lib v0.33.4 h1:LmiElxqQwISv0c2mdL3rswmPIIN6Qh+4Lv0bdKTTFoM= +k8s.io/csi-translation-lib v0.33.4/go.mod h1:A4Kn6gTWX5EkxbHgtiDitNjDVvk2plie7lo8Hpa19Bg= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -1389,8 +1387,8 @@ k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUy k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/kubernetes v1.33.4 h1:T1d5FLUYm3/KyUeV7YJhKTR980zHCHb7K2xhCSo3lE8= k8s.io/kubernetes v1.33.4/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00= -k8s.io/mount-utils v0.33.3 h1:Q1jsnqdS4LdtJSYSXgiQv/XNrRHQncLk3gMYjKNSZrE= -k8s.io/mount-utils v0.33.3/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE= +k8s.io/mount-utils v0.33.4 h1:o83Qx0AgY5JDYhFV6gAYfSy+GAPIuPqSKdEqac5lxqo= +k8s.io/mount-utils v0.33.4/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= diff --git a/vendor/modules.txt b/vendor/modules.txt index db5ef47fd51..28393a8be09 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -708,7 +708,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.33.3 +# k8s.io/api v0.33.4 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -769,12 +769,12 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.33.1 +# k8s.io/apiextensions-apiserver v0.33.4 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.33.3 +# k8s.io/apimachinery v0.33.4 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -832,11 +832,11 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.33.1 +# k8s.io/apiserver v0.33.4 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/util/feature -# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.33.0 +# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.33.4 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1111,11 +1111,11 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.33.0 +# k8s.io/cloud-provider v0.33.4 ## explicit; go 1.24.0 k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.33.1 +# k8s.io/component-base v0.33.4 ## explicit; go 1.24.0 k8s.io/component-base/featuregate k8s.io/component-base/metrics @@ -1124,10 +1124,10 @@ k8s.io/component-base/metrics/prometheus/feature k8s.io/component-base/metrics/prometheusextension k8s.io/component-base/version k8s.io/component-base/zpages/features -# k8s.io/controller-manager v0.33.0 +# k8s.io/controller-manager v0.33.4 ## explicit; go 1.24.0 k8s.io/controller-manager/pkg/features -# k8s.io/csi-translation-lib v0.33.0 +# k8s.io/csi-translation-lib v0.33.4 ## explicit; go 1.24.0 # k8s.io/klog/v2 v2.130.1 ## explicit; go 1.18 @@ -1162,7 +1162,7 @@ k8s.io/kubernetes/pkg/volume/util/hostutil k8s.io/kubernetes/pkg/volume/util/recyclerclient k8s.io/kubernetes/pkg/volume/util/subpath k8s.io/kubernetes/pkg/volume/util/types -# k8s.io/mount-utils v0.33.3 +# k8s.io/mount-utils v0.33.4 ## explicit; go 1.24.0 k8s.io/mount-utils # k8s.io/utils v0.0.0-20241210054802-24370beab758 @@ -1243,4 +1243,4 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 # github.com/ceph/ceph-csi/api => ./api -# k8s.io/client-go => k8s.io/client-go v0.33.0 +# k8s.io/client-go => k8s.io/client-go v0.33.4 From 8d7ec3a4b20d84197dbc575ea376bbd2240d4ebd Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Thu, 21 Aug 2025 15:08:44 +0200 Subject: [PATCH 040/157] nvmeof: cleanup resources when CreateVolume fails When CreateVolume fails, all allocated resources (RBD-image and gateway configuration) should be free'd again. Signed-off-by: Niels de Vos --- .../nvmeof/controller/controllerserver.go | 37 ++++++++++++++++--- 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go index 36ebddae75b..18db99ce608 100644 --- a/internal/nvmeof/controller/controllerserver.go +++ b/internal/nvmeof/controller/controllerserver.go @@ -80,7 +80,6 @@ func (cs *Server) CreateVolume( req *csi.CreateVolumeRequest, ) (*csi.CreateVolumeResponse, error) { // TODO - need to check and modify the request to ensure it has the required fields NvmeOF supports (like nfs does) - // TODO - in case of failure, we should clean up the created resources (RBD volume, subsystem, etc.) // TODO - move all hardcoded strings to constants // Step 0: Validate request @@ -106,6 +105,25 @@ func (cs *Server) CreateVolume( backend := res.GetVolume() volumeID := backend.GetVolumeId() + + defer func() { + // skip cleanup if there was no error + if err == nil { + return + } + + _, cleanupErr := cs.backendServer.DeleteVolume( + ctx, + &csi.DeleteVolumeRequest{ + VolumeId: volumeID, + Secrets: req.GetSecrets(), + }, + ) + if cleanupErr != nil { + log.ErrorLog(ctx, "failed to cleanup volume %q: %v", volumeID, cleanupErr) + } + }() + rbdImageName := res.GetVolume().GetVolumeContext()["imageName"] rbdPoolName := res.GetVolume().GetVolumeContext()["pool"] @@ -113,10 +131,20 @@ func (cs *Server) CreateVolume( nvmeofData, err := createNVMeoFResources(ctx, req, rbdPoolName, rbdImageName) if err != nil { log.ErrorLog(ctx, "NVMe-oF resource setup failed for volumeID %s: %v", volumeID, err) - // TODO: Implement cleanup of RBD volume on NVMe-oF failure return nil, status.Errorf(codes.Internal, "NVMe-oF setup failed: %v", err) } + defer func() { + // skip cleanup if there was no error + if err == nil { + return + } + + cleanupErr := cleanupNVMeoFResources(ctx, nvmeofData) + if cleanupErr != nil { + log.ErrorLog(ctx, "failed to cleanup NVMe-oF resources for volume %q: %v", volumeID, cleanupErr) + } + }() // step 3: Populate volume context for NodeServer populateVolumeContext(backend, nvmeofData) @@ -153,7 +181,7 @@ func (cs *Server) DeleteVolume( log.DebugLog(ctx, "No NVMe-oF metadata found, skipping NVMe-oF cleanup: %v", err) } else { // Clean up NVMe-oF resources - if err := cleanupNVMeoFResources(ctx, req, nvmeofData); err != nil { + if err := cleanupNVMeoFResources(ctx, nvmeofData); err != nil { log.ErrorLog(ctx, "NVMe-oF cleanup failed (continuing with RBD deletion): %v", err) return nil, status.Errorf(codes.Internal, "NVMe-oF cleanup failed: %v", err) @@ -328,7 +356,6 @@ func cleanupEmptySubsystem(ctx context.Context, gateway *nvmeof.GatewayRpcClient // createNVMeoFResources sets up the NVMe-oF resources for the given RBD volume. // TODO - need to support multiple listeners. -// TODO - need to fallback cleanup if any step fails. func createNVMeoFResources( ctx context.Context, req *csi.CreateVolumeRequest, @@ -413,7 +440,6 @@ func createNVMeoFResources( // This includes removing the host, listener, namespace, and potentially the subsystem. func cleanupNVMeoFResources( ctx context.Context, - req *csi.DeleteVolumeRequest, nvmeofData *nvmeof.NVMeoFVolumeData, ) error { // Step 1: Connect to gateway using stored management address @@ -453,7 +479,6 @@ func cleanupNVMeoFResources( if err := cleanupEmptySubsystem(ctx, gateway, nvmeofData.SubsystemNQN); err != nil { return fmt.Errorf("failed to cleanup empty subsystem %s: %w", nvmeofData.SubsystemNQN, err) } - log.DebugLog(ctx, "NVMe-oF resources cleaned up for volume with ID %s", req.GetVolumeId()) return nil } From 06b0cc66d796ad5abcb0d5d71353ea1979eaa67c Mon Sep 17 00:00:00 2001 From: gadi-didi Date: Mon, 1 Sep 2025 16:30:20 +0300 Subject: [PATCH 041/157] nvmeof: combine create\delete subsystem and listener combine them to one call. Signed-off-by: gadi-didi --- .../nvmeof/controller/controllerserver.go | 57 +++++++++++-------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/internal/nvmeof/controller/controllerserver.go b/internal/nvmeof/controller/controllerserver.go index 18db99ce608..853d4edbafa 100644 --- a/internal/nvmeof/controller/controllerserver.go +++ b/internal/nvmeof/controller/controllerserver.go @@ -305,7 +305,13 @@ func validatePublishVolumeRequest(req *csi.ControllerPublishVolumeRequest) error } // ensureSubsystem checks if the subsystem exists, and creates it if not. -func ensureSubsystem(ctx context.Context, gateway *nvmeof.GatewayRpcClient, subsystemNQN string) error { +// then creates the listener. +func ensureSubsystem( + ctx context.Context, + gateway *nvmeof.GatewayRpcClient, + subsystemNQN string, + listenerInfo nvmeof.ListenerDetails, +) error { exists, err := gateway.SubsystemExists(ctx, subsystemNQN) if err != nil { return err @@ -314,12 +320,23 @@ func ensureSubsystem(ctx context.Context, gateway *nvmeof.GatewayRpcClient, subs return nil } // Create if doesn't exist (controller decision) + err = gateway.CreateSubsystem(ctx, subsystemNQN) + if err != nil { + return err + } - return gateway.CreateSubsystem(ctx, subsystemNQN) + // Create listeners + + return gateway.CreateListener(ctx, subsystemNQN, listenerInfo) } -// cleanupEmptySubsystem checks if the subsystem is empty (no namespaces), if so, deletes it. -func cleanupEmptySubsystem(ctx context.Context, gateway *nvmeof.GatewayRpcClient, subsystemNQN string, +// cleanupEmptySubsystem checks if the subsystem is empty (no namespaces), if so, +// first deletes the listener and then deletes it. +func cleanupEmptySubsystem( + ctx context.Context, + gateway *nvmeof.GatewayRpcClient, + subsystemNQN string, + listenerInfo nvmeof.ListenerDetails, ) error { if subsystemNQN == "" { return nil @@ -344,6 +361,13 @@ func cleanupEmptySubsystem(ctx context.Context, gateway *nvmeof.GatewayRpcClient return nil } + + // subsystem is empty delete listener first + err = gateway.DeleteListener(ctx, subsystemNQN, listenerInfo) + if err != nil { + return fmt.Errorf("failed to delete listener for subsystem %s: %w", subsystemNQN, err) + } + log.DebugLog(ctx, "Subsystem %s is empty, deleting", subsystemNQN) err = gateway.DeleteSubsystem(ctx, subsystemNQN) if err != nil { @@ -406,20 +430,15 @@ func createNVMeoFResources( } }() - // Step 3: Ensure subsystem exists - if err := ensureSubsystem(ctx, gateway, nvmeofData.SubsystemNQN); err != nil { + // Step 3: Ensure subsystem exists (and listener) + if err := ensureSubsystem(ctx, gateway, nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo); err != nil { return nil, fmt.Errorf("subsystem setup failed: %w", err) } - // Step 4: Create listeners - err = gateway.CreateListener(ctx, nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) - if err != nil { - return nil, fmt.Errorf("listener creation failed: %w", err) - } - log.DebugLog(ctx, "Listener created for subsystem %s at %s", nvmeofData.SubsystemNQN, + log.DebugLog(ctx, "subsystem %s and Listener %s for the subsystem were created", nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) - // Step 5: Create namespace and set its uuid + // Step 4: Create namespace and set its uuid nsid, err := gateway.CreateNamespace(ctx, nvmeofData.SubsystemNQN, rbdPoolName, rbdImageName) if err != nil { return nil, fmt.Errorf("namespace creation failed: %w", err) @@ -467,16 +486,8 @@ func cleanupNVMeoFResources( } log.DebugLog(ctx, "Namespace %d deleted for subsystem %s", nvmeofData.NamespaceID, nvmeofData.SubsystemNQN) - // Step 3: Delete listener - err = gateway.DeleteListener(ctx, nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo) - if err != nil { - return fmt.Errorf("failed to delete listener for subsystem %s: %w", nvmeofData.SubsystemNQN, err) - } - log.DebugLog(ctx, "Listener deleted for subsystem %s at %s", nvmeofData.SubsystemNQN, - nvmeofData.ListenerInfo) - - // Step 4: Cleanup empty subsystem - if err := cleanupEmptySubsystem(ctx, gateway, nvmeofData.SubsystemNQN); err != nil { + // Step 3: Cleanup empty subsystem + if err := cleanupEmptySubsystem(ctx, gateway, nvmeofData.SubsystemNQN, nvmeofData.ListenerInfo); err != nil { return fmt.Errorf("failed to cleanup empty subsystem %s: %w", nvmeofData.SubsystemNQN, err) } From 618c5fa4f9944611f0d389e481693d99f71b5220 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 12:30:51 +0000 Subject: [PATCH 042/157] rebase: bump the github-dependencies group with 2 updates Bumps the github-dependencies group with 2 updates: [github.com/aws/aws-sdk-go-v2/service/sts](https://github.com/aws/aws-sdk-go-v2) and [github.com/stretchr/testify](https://github.com/stretchr/testify). Updates `github.com/aws/aws-sdk-go-v2/service/sts` from 1.37.0 to 1.38.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.37.0...v1.38.0) Updates `github.com/stretchr/testify` from 1.10.0 to 1.11.0 - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.10.0...v1.11.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/service/sts dependency-version: 1.38.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: github-dependencies - dependency-name: github.com/stretchr/testify dependency-version: 1.11.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: github-dependencies ... Signed-off-by: dependabot[bot] --- go.mod | 12 +- go.sum | 23 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../internal/configsources/CHANGELOG.md | 4 + .../configsources/go_module_metadata.go | 2 +- .../endpoints/awsrulesfn/partitions.go | 68 ++-- .../endpoints/awsrulesfn/partitions.json | 68 ++-- .../internal/endpoints/v2/CHANGELOG.md | 4 + .../endpoints/v2/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 4 + .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 9 + .../service/sts/deserializers.go | 9 - .../service/sts/go_module_metadata.go | 2 +- .../testify/assert/assertion_compare.go | 22 +- .../testify/assert/assertion_format.go | 51 ++- .../testify/assert/assertion_forward.go | 102 +++-- .../testify/assert/assertion_order.go | 2 +- .../stretchr/testify/assert/assertions.go | 367 ++++++++++++------ .../github.com/stretchr/testify/assert/doc.go | 4 + .../testify/assert/http_assertions.go | 4 +- .../testify/assert/yaml/yaml_custom.go | 1 - .../testify/assert/yaml/yaml_default.go | 1 - .../stretchr/testify/assert/yaml/yaml_fail.go | 1 - .../stretchr/testify/require/doc.go | 2 + .../stretchr/testify/require/require.go | 108 ++++-- .../testify/require/require_forward.go | 102 +++-- vendor/modules.txt | 12 +- 28 files changed, 652 insertions(+), 338 deletions(-) diff --git a/go.mod b/go.mod index 196e8b1b5d6..76f2c2eb828 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0 github.com/IBM/keyprotect-go-client v0.15.1 github.com/aws/aws-sdk-go v1.55.8 - github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 + github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423 github.com/ceph/go-ceph v0.35.0 @@ -27,7 +27,7 @@ require ( github.com/libopenstorage/secrets v0.0.0-20231011182615-5f4b25ceede1 github.com/pkg/xattr v0.4.12 github.com/prometheus/client_golang v1.23.0 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.0 go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.41.0 golang.org/x/net v0.43.0 @@ -69,11 +69,11 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect github.com/ansel1/merry v1.6.2 // indirect github.com/ansel1/merry/v2 v2.0.1 // indirect - github.com/aws/aws-sdk-go-v2 v1.38.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 // indirect + github.com/aws/aws-sdk-go-v2 v1.38.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4 // indirect github.com/aws/smithy-go v1.22.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect diff --git a/go.sum b/go.sum index 3b0bb687158..80b91b3de92 100644 --- a/go.sum +++ b/go.sum @@ -106,18 +106,18 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/aws/aws-sdk-go v1.44.164/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.38.0 h1:UCRQ5mlqcFk9HJDIqENSLR3wiG1VTWlyUfLDEvY7RxU= -github.com/aws/aws-sdk-go-v2 v1.38.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 h1:o9RnO+YZ4X+kt5Z7Nvcishlz0nksIt2PIzDglLMP0vA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3/go.mod h1:+6aLJzOG1fvMOyzIySYjOFjcguGvVRL68R+uoRencN4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 h1:joyyUFhiTQQmVK6ImzNU9TQSNRNeD9kOklqTzyk5v6s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3/go.mod h1:+vNIyZQP3b3B1tSLI0lxvrU9cfM7gpdRXMFfm67ZcPc= +github.com/aws/aws-sdk-go-v2 v1.38.1 h1:j7sc33amE74Rz0M/PoCpsZQ6OunLqys/m5antM0J+Z8= +github.com/aws/aws-sdk-go-v2 v1.38.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4 h1:IdCLsiiIj5YJ3AFevsewURCPV+YWUlOW8JiPhoAy8vg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4/go.mod h1:l4bdfCD7XyyZA9BolKBo1eLqgaJxl0/x91PL4Yqe0ao= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4 h1:j7vjtr1YIssWQOMeOWRbh3z8g2oY/xPjnZH2gLY4sGw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4/go.mod h1:yDmJgqOiH4EA8Hndnv4KwAo8jCGTSnM5ASG1nBI+toA= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 h1:ieRzyHXypu5ByllM7Sp4hC5f/1Fy5wqxqY0yB85hC7s= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3/go.mod h1:O5ROz8jHiOAKAwx179v+7sHMhfobFVi6nZt8DEyiYoM= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 h1:MG9VFW43M4A8BYeAfaJJZWrroinxeTi2r3+SnmLQfSA= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0/go.mod h1:JdeBDPgpJfuS6rU/hNglmOigKhyEZtBmbraLE4GK1J8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4 h1:ueB2Te0NacDMnaC+68za9jLwkjzxGWm0KB5HTUHjLTI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4/go.mod h1:nLEfLnVMmLvyIG58/6gsSA03F1voKGaCfHV7+lR8S7s= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 h1:iV1Ko4Em/lkJIsoKyGfc0nQySi+v0Udxr6Igq+y9JZc= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.0/go.mod h1:bEPcjW7IbolPfK67G1nilqWyoxYMSPrDiIQ3RdIdKgo= github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -640,8 +640,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= +github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index dbbd7f0b954..89449f67b26 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.38.0" +const goModuleVersion = "1.38.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 17250d76aaa..48b7efd9d6c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.4.4 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.4.3 (2025-08-11) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 046d0023791..e304ef67cf6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.4.3" +const goModuleVersion = "1.4.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 619c1f5d8f5..83e5ac62bf7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -297,32 +297,18 @@ var partitions = []Partition{ }, }, { - ID: "aws-us-gov", - RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + ID: "aws-eusc", + RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", + Name: "aws-eusc", + DnsSuffix: "amazonaws.eu", + DualStackDnsSuffix: "api.amazonwebservices.eu", SupportsFIPS: true, - SupportsDualStack: true, - ImplicitGlobalRegion: "us-gov-west-1", + SupportsDualStack: false, + ImplicitGlobalRegion: "eusc-de-east-1", }, Regions: map[string]RegionOverrides{ - "aws-us-gov-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-west-1": { + "eusc-de-east-1": { Name: nil, DnsSuffix: nil, DualStackDnsSuffix: nil, @@ -337,7 +323,7 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso", DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "api.aws.ic.gov", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "us-iso-east-1", @@ -372,7 +358,7 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-b", DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "api.aws.scloud", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "us-isob-east-1", @@ -400,7 +386,7 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-e", DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "api.cloud-aws.adc-e.uk", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "eu-isoe-west-1", @@ -428,7 +414,7 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-f", DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "api.aws.hci.ic.gov", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "us-isof-south-1", @@ -458,18 +444,32 @@ var partitions = []Partition{ }, }, { - ID: "aws-eusc", - RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", + ID: "aws-us-gov", + RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-eusc", - DnsSuffix: "amazonaws.eu", - DualStackDnsSuffix: "amazonaws.eu", + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "eusc-de-east-1", + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", }, Regions: map[string]RegionOverrides{ - "eusc-de-east-1": { + "aws-us-gov-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-west-1": { Name: nil, DnsSuffix: nil, DualStackDnsSuffix: nil, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index 456b07fca67..299cb220419 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -54,7 +54,7 @@ "description" : "Asia Pacific (Thailand)" }, "aws-global" : { - "description" : "AWS Standard global region" + "description" : "aws global region" }, "ca-central-1" : { "description" : "Canada (Central)" @@ -127,7 +127,7 @@ "regionRegex" : "^cn\\-\\w+\\-\\d+$", "regions" : { "aws-cn-global" : { - "description" : "AWS China global region" + "description" : "aws-cn global region" }, "cn-north-1" : { "description" : "China (Beijing)" @@ -137,32 +137,26 @@ } } }, { - "id" : "aws-us-gov", + "id" : "aws-eusc", "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-gov-west-1", - "name" : "aws-us-gov", - "supportsDualStack" : true, + "dnsSuffix" : "amazonaws.eu", + "dualStackDnsSuffix" : "api.amazonwebservices.eu", + "implicitGlobalRegion" : "eusc-de-east-1", + "name" : "aws-eusc", + "supportsDualStack" : false, "supportsFIPS" : true }, - "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", "regions" : { - "aws-us-gov-global" : { - "description" : "AWS GovCloud (US) global region" - }, - "us-gov-east-1" : { - "description" : "AWS GovCloud (US-East)" - }, - "us-gov-west-1" : { - "description" : "AWS GovCloud (US-West)" + "eusc-de-east-1" : { + "description" : "EU (Germany)" } } }, { "id" : "aws-iso", "outputs" : { "dnsSuffix" : "c2s.ic.gov", - "dualStackDnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "api.aws.ic.gov", "implicitGlobalRegion" : "us-iso-east-1", "name" : "aws-iso", "supportsDualStack" : false, @@ -171,7 +165,7 @@ "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", "regions" : { "aws-iso-global" : { - "description" : "AWS ISO (US) global region" + "description" : "aws-iso global region" }, "us-iso-east-1" : { "description" : "US ISO East" @@ -184,7 +178,7 @@ "id" : "aws-iso-b", "outputs" : { "dnsSuffix" : "sc2s.sgov.gov", - "dualStackDnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "api.aws.scloud", "implicitGlobalRegion" : "us-isob-east-1", "name" : "aws-iso-b", "supportsDualStack" : false, @@ -193,7 +187,7 @@ "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", "regions" : { "aws-iso-b-global" : { - "description" : "AWS ISOB (US) global region" + "description" : "aws-iso-b global region" }, "us-isob-east-1" : { "description" : "US ISOB East (Ohio)" @@ -203,7 +197,7 @@ "id" : "aws-iso-e", "outputs" : { "dnsSuffix" : "cloud.adc-e.uk", - "dualStackDnsSuffix" : "cloud.adc-e.uk", + "dualStackDnsSuffix" : "api.cloud-aws.adc-e.uk", "implicitGlobalRegion" : "eu-isoe-west-1", "name" : "aws-iso-e", "supportsDualStack" : false, @@ -212,7 +206,7 @@ "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { "aws-iso-e-global" : { - "description" : "AWS ISOE (Europe) global region" + "description" : "aws-iso-e global region" }, "eu-isoe-west-1" : { "description" : "EU ISOE West" @@ -222,7 +216,7 @@ "id" : "aws-iso-f", "outputs" : { "dnsSuffix" : "csp.hci.ic.gov", - "dualStackDnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "api.aws.hci.ic.gov", "implicitGlobalRegion" : "us-isof-south-1", "name" : "aws-iso-f", "supportsDualStack" : false, @@ -231,7 +225,7 @@ "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", "regions" : { "aws-iso-f-global" : { - "description" : "AWS ISOF global region" + "description" : "aws-iso-f global region" }, "us-isof-east-1" : { "description" : "US ISOF EAST" @@ -241,19 +235,25 @@ } } }, { - "id" : "aws-eusc", + "id" : "aws-us-gov", "outputs" : { - "dnsSuffix" : "amazonaws.eu", - "dualStackDnsSuffix" : "amazonaws.eu", - "implicitGlobalRegion" : "eusc-de-east-1", - "name" : "aws-eusc", - "supportsDualStack" : false, + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", + "name" : "aws-us-gov", + "supportsDualStack" : true, "supportsFIPS" : true }, - "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", "regions" : { - "eusc-de-east-1" : { - "description" : "EU (Germany)" + "aws-us-gov-global" : { + "description" : "aws-us-gov global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" } } } ], diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index b7bd93f378b..ec7d54beffd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,7 @@ +# v2.7.4 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.7.3 (2025-08-11) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index fd1a4e36802..e7b4e1cd18c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.7.3" +const goModuleVersion = "2.7.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 18bff6cea2f..62da8050f3b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.13.4 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.3 (2025-08-11) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 226823eb790..4a0c6ae3c9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.3" +const goModuleVersion = "1.13.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 3c7bac38282..ca18a1e9f2c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,12 @@ +# v1.38.0 (2025-08-21) + +* **Feature**: Remove incorrect endpoint tests +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.1 (2025-08-20) + +* **Bug Fix**: Remove unused deserialization code. + # v1.37.0 (2025-08-11) * **Feature**: Add support for configuring per-service Options via callback on global config. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go index 59349890f6f..a1ac917ec6a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -21,17 +21,8 @@ import ( "io" "strconv" "strings" - "time" ) -func deserializeS3Expires(v string) (*time.Time, error) { - t, err := smithytime.ParseHTTPDate(v) - if err != nil { - return nil, nil - } - return &t, nil -} - type awsAwsquery_deserializeOpAssumeRole struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 82e0bf854c3..931a5d81e11 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.37.0" +const goModuleVersion = "1.38.0" diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 7e19eba0904..ffb24e8e313 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...) } // Less asserts that the first element is less than the second @@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...) } // Positive asserts that the specified element is positive @@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not positive", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...) } // Negative asserts that the specified element is negative @@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not negative", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { @@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare compareResult, isComparable := compare(e1, e2, e1Kind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + return Fail(t, failMessage, msgAndArgs...) } return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 19063416577..c592f6ad5fb 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// assert.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) } +// IsNotTypef asserts that the specified objects are not of the same type. +// +// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. +// +// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 21629087baf..58db928450d 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st return ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in return IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg return NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo return NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1d2f71824aa..2fdf80fdd30 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 4e91332bb51..de8de0cb6c4 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/ // of each stack frame leading from the current test to the assert call that // failed. func CallerInfo() []string { - var pc uintptr - var ok bool var file string var line int var name string + const stackFrameBufferSize = 10 + pcs := make([]uintptr, stackFrameBufferSize) + callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } + offset := 1 - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } + for { + n := runtime.Callers(offset, pcs) - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { + if n == 0 { break } - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + frames := runtime.CallersFrames(pcs[:n]) + + for { + frame, more := frames.Next() + pc = frame.PC + file = frame.File + line = frame.Line + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break } - } - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + if len(parts) > 1 { + filename := parts[len(parts)-1] + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + dotPos := strings.LastIndexByte(name, '.') + name = name[dotPos+1:] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + + if !more { + break + } } + + // Next batch + offset += cap(pcs) } return callers @@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, return true } +func isType(expectedType, object interface{}) bool { + return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) +} + // IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { +// +// assert.IsType(t, &MyStruct{}, &MyStruct{}) +func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool { + if isType(expectedType, object) { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } + return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...) +} - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) +// IsNotType asserts that the specified objects are not of the same type. +// +// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool { + if !isType(theType, object) { + return true } - - return true + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...) } // Equal asserts that two objects are equal. @@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } return true - } // validateEqualArgs checks whether provided arguments can be safely used in the @@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b if !same { // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + "expected: %p %#[1]v\n"+ + "actual : %p %#[2]v", + expected, actual), msgAndArgs...) } return true @@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} same, ok := samePointers(expected, actual) if !ok { - //fails when the arguments are not pointers + // fails when the arguments are not pointers return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) } if same { return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + "Expected and actual point to the same object: %p %#[1]v", + expected), msgAndArgs...) } return true } @@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false, false //not both are pointers + return false, false // not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) @@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } return true - } // EqualExportedValues asserts that the types of two objects are equal and their public @@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} } return Equal(t, expected, actual, msgAndArgs...) - } // NotNil asserts that the specified object is not nil. @@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { - // get nil case out of the way if object == nil { return true } - objValue := reflect.ValueOf(object) + return isEmptyValue(reflect.ValueOf(object)) +} +// isEmptyValue gets whether the specified reflect.Value is considered empty or not. +func isEmptyValue(objValue reflect.Value) bool { + if objValue.IsZero() { + return true + } + // Special cases of non-zero values that we consider empty switch objValue.Kind() { // collection types are empty when they have no element + // Note: array types are empty when they match their zero-initialized state. case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // non-nil pointers are empty if the value they point to is empty case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) + return isEmptyValue(objValue.Elem()) } + return false } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) @@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } // getLen tries to get the length of an object. @@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // False asserts that the specified value is false. @@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // NotEqual asserts that the specified values are NOT equal. @@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } return true - } // NotEqualValues asserts that two objects are not equal even when converted to the same type @@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (true, false) if element was not found. // return (true, true) if element was found. func containsElement(list interface{}, element interface{}) (ok, found bool) { - listValue := reflect.ValueOf(list) listType := reflect.TypeOf(list) if listType == nil { @@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { } } return true, false - } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo } return true - } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } return true - } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// assert.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) @@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...) } if !found { return true @@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// assert.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool { default: return r.MatchString(fmt.Sprint(v)) } - } // Regexp asserts that a specified regexp matches a string. @@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } return !match - } // Zero asserts that i is the zero value for its type. @@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return true } - tick = ticker.C + tickC = ticker.C } } } @@ -1964,6 +2028,9 @@ type CollectT struct { errors []error } +// Helper is like [testing.T.Helper] but does nothing. +func (CollectT) Helper() {} + // Errorf collects the error. func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) @@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time var lastFinishedTickErrs []error ch := make(chan *CollectT, 1) + checkCond := func() { + collect := new(CollectT) + defer func() { + ch <- collect + }() + condition(collect) + } + timer := time.NewTimer(waitFor) defer timer.Stop() ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: for _, err := range lastFinishedTickErrs { t.Errorf("%v", err) } return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect - }() - condition(collect) - }() + case <-tickC: + tickC = nil + go checkCond() case collect := <-ch: if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. lastFinishedTickErrs = collect.errors - tick = ticker.C + tickC = ticker.C } } } @@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return true - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return Fail(t, "Condition satisfied", msgAndArgs...) } - tick = ticker.C + tickC = ticker.C } } } @@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { var expectedText string if target != nil { expectedText = target.Error() + if err == nil { + return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...) + } } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ "expected: %q\n"+ @@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { expectedText = target.Error() } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ "found: %q\n"+ @@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ return true } - chain := buildErrorChainString(err) + expectedType := reflect.TypeOf(target).Elem().String() + if err == nil { + return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+ + "expected: %s", expectedType), msgAndArgs...) + } + + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, + "expected: %s\n"+ + "in chain: %s", expectedType, chain, ), msgAndArgs...) } @@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa return true } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", target, chain, + "found: %s\n"+ + "in chain: %s", reflect.TypeOf(target).Elem().String(), chain, ), msgAndArgs...) } -func buildErrorChainString(err error) string { +func unwrapAll(err error) (errs []error) { + errs = append(errs, err) + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + errs = append(errs, unwrapAll(err)...) + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + errs = append(errs, unwrapAll(err)...) + } + } + return +} + +func buildErrorChainString(err error, withType bool) string { if err == nil { return "" } - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) + var chain string + errs := unwrapAll(err) + for i := range errs { + if i != 0 { + chain += "\n\t" + } + chain += fmt.Sprintf("%q", errs[i].Error()) + if withType { + chain += fmt.Sprintf(" (%T)", errs[i]) + } } return chain } diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d387..a0b953aa5cf 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,5 +1,9 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // +// # Note +// +// All functions in this package return a bool value indicating whether the assertion has passed. +// // # Example Usage // // The following is a complete example using assert in a standard test function: diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 861ed4b7ced..5a6bb75f2cf 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go index baa0cc7d7fc..5a74c4f4d5b 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -1,5 +1,4 @@ //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default -// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default // Package yaml is an implementation of YAML functions that calls a pluggable implementation. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go index b83c6cf64c2..0bae80e34a3 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -1,5 +1,4 @@ //go:build !testify_yaml_fail && !testify_yaml_custom -// +build !testify_yaml_fail,!testify_yaml_custom // Package yaml is just an indirection to handle YAML deserialization. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go index e78f7dfe69a..8041803fd2f 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -1,5 +1,4 @@ //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default -// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default // Package yaml is an implementation of YAML functions that always fail. // diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go index 96843472455..c8e3f94a803 100644 --- a/vendor/github.com/stretchr/testify/require/doc.go +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -23,6 +23,8 @@ // // The `require` package have same global functions as in the `assert` package, // but instead of returning a boolean result they call `t.FailNow()`. +// A consequence of this is that it must be called from the goroutine running +// the test function, not from other goroutines created during the test. // // Every assertion function also takes an optional string message as the final argument, // allowing custom error messages to be appended to the message the assertion method outputs. diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index d8921950d7b..2d02f9bcef1 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string t.FailNow() } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Error(t, err) { -// require.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// require.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Errorf(t, err, "error message %s", "formatted") { -// require.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// require.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf t.FailNow() } +// IsNotType asserts that the specified objects are not of the same type. +// +// require.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotType(t, theType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotTypef(t, theType, object, msg, args...) { + return + } + t.FailNow() +} + // IsType asserts that the specified objects are of the same type. +// +// require.IsType(t, &MyStruct{}, &MyStruct{}) func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs } // IsTypef asserts that the specified objects are of the same type. +// +// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str t.FailNow() } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if require.NotEmpty(t, obj) { // require.Equal(t, "two", obj[1]) @@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if require.NotEmptyf(t, obj, "error message %s", "formatted") { // require.Equal(t, "two", obj[1]) @@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubset(t, [1, 3, 4], [1, 2]) // require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// require.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subset(t, [1, 2, 3], [1, 2]) // require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// require.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 1bd87304f43..e6f7e944684 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/modules.txt b/vendor/modules.txt index 28393a8be09..33c70768688 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -118,7 +118,7 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2 v1.38.0 +# github.com/aws/aws-sdk-go-v2 v1.38.1 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults @@ -141,19 +141,19 @@ github.com/aws/aws-sdk-go-v2/internal/sdk github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 +# github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints @@ -478,7 +478,7 @@ github.com/sirupsen/logrus # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.10.0 +# github.com/stretchr/testify v1.11.0 ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml From 79c4e5b587d6a3f4e58e394422796bc4ff4d63fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Sep 2025 11:18:16 +0000 Subject: [PATCH 043/157] rebase: bump google.golang.org/protobuf from 1.36.7 to 1.36.8 Bumps google.golang.org/protobuf from 1.36.7 to 1.36.8. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-version: 1.36.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- .../protobuf/internal/genid/api_gen.go | 6 ++++++ .../protobuf/internal/version/version.go | 2 +- .../protobuf/types/descriptorpb/descriptor.pb.go | 10 +++++++--- vendor/modules.txt | 4 ++-- 6 files changed, 19 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 76f2c2eb828..e0c75635a80 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( golang.org/x/net v0.43.0 golang.org/x/sys v0.35.0 google.golang.org/grpc v1.75.0 - google.golang.org/protobuf v1.36.7 + google.golang.org/protobuf v1.36.8 k8s.io/api v0.33.4 k8s.io/apimachinery v0.33.4 k8s.io/cloud-provider v0.33.4 diff --git a/go.sum b/go.sum index 80b91b3de92..37428f0c1ef 100644 --- a/go.sum +++ b/go.sum @@ -1318,8 +1318,8 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= -google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go index df8f9185013..3ceb6fa7f5e 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -27,6 +27,7 @@ const ( Api_SourceContext_field_name protoreflect.Name = "source_context" Api_Mixins_field_name protoreflect.Name = "mixins" Api_Syntax_field_name protoreflect.Name = "syntax" + Api_Edition_field_name protoreflect.Name = "edition" Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" @@ -35,6 +36,7 @@ const ( Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" + Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition" ) // Field numbers for google.protobuf.Api. @@ -46,6 +48,7 @@ const ( Api_SourceContext_field_number protoreflect.FieldNumber = 5 Api_Mixins_field_number protoreflect.FieldNumber = 6 Api_Syntax_field_number protoreflect.FieldNumber = 7 + Api_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Method. @@ -63,6 +66,7 @@ const ( Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" Method_Options_field_name protoreflect.Name = "options" Method_Syntax_field_name protoreflect.Name = "syntax" + Method_Edition_field_name protoreflect.Name = "edition" Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" @@ -71,6 +75,7 @@ const ( Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" + Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition" ) // Field numbers for google.protobuf.Method. @@ -82,6 +87,7 @@ const ( Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 Method_Options_field_number protoreflect.FieldNumber = 6 Method_Syntax_field_number protoreflect.FieldNumber = 7 + Method_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Mixin. diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a53364c5992..697d1c14f3c 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 7 + Patch = 8 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 6843b0beafa..4eacb523c33 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -2873,7 +2873,10 @@ type FieldOptions struct { // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // DEPRECATED. DO NOT USE! // For Google-internal migration only. Do not use. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. @@ -2977,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool { return Default_FieldOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetWeak() bool { if x != nil && x.Weak != nil { return *x.Weak @@ -4843,7 +4847,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + - "\"\x9d\r\n" + + "\"\xa1\r\n" + "\fFieldOptions\x12A\n" + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + @@ -4852,9 +4856,9 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + "\n" + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + - "deprecated\x12\x19\n" + + "deprecated\x12\x1d\n" + "\x04weak\x18\n" + - " \x01(\b:\x05falseR\x04weak\x12(\n" + + " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + diff --git a/vendor/modules.txt b/vendor/modules.txt index 33c70768688..04f5ad449c0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -655,8 +655,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.7 -## explicit; go 1.22 +# google.golang.org/protobuf v1.36.8 +## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext From 075782e3ca5023b74bcbf4afde742d823c2f8e94 Mon Sep 17 00:00:00 2001 From: Adi Aloni Date: Mon, 1 Sep 2025 14:03:21 +0300 Subject: [PATCH 044/157] util: allow ParseClientIP to handle compressed ipv6 addresses Previously ParseClientIP would use regex to match addresses which was too strict and could only correctly match complete ipv4 and ipv6 addreses. This commit changes the ParseClientIP function to use Go's net package to correctly parse all valid ip forms. Signed-off-by: Adi Aloni --- .../csi-addons/networkfence/fencing_test.go | 2 +- internal/util/util.go | 30 ++++++++----------- internal/util/util_test.go | 8 ++++- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/internal/csi-addons/networkfence/fencing_test.go b/internal/csi-addons/networkfence/fencing_test.go index 05d6e364c74..3c2adb2f385 100644 --- a/internal/csi-addons/networkfence/fencing_test.go +++ b/internal/csi-addons/networkfence/fencing_test.go @@ -68,7 +68,7 @@ func TestFetchIP(t *testing.T) { expectedErr: false, }, { - clientInfo: "client.4305 2001:0db8:85a3:0000:0000:8a2e:0370:7334:0/422650892", + clientInfo: "client.4305 [2001:0db8:85a3:0000:0000:8a2e:0370:7334]:0/422650892", expectedIP: "2001:db8:85a3::8a2e:370:7334", expectedErr: false, }, diff --git a/internal/util/util.go b/internal/util/util.go index 757a7d69df1..b7347de8a5a 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -302,24 +302,18 @@ func GetVolumeContext(parameters map[string]string) map[string]string { // Handles both IPv4 and IPv6 addresses. // Example: "10.244.0.1:0/2686266785" returns "10.244.0.1". func ParseClientIP(addr string) (string, error) { - // Attempt to extract the IP address using a regular expression - // the regular expression aims to match either a complete IPv6 - // address or a complete IPv4 address follows by any prefix (v1 or v2) - // if exists - // (?:v[0-9]+:): this allows for an optional prefix starting with "v" - // followed by one or more digits and a colon. - // The ? outside the group makes the entire prefix section optional. - // (?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}: this allows to check for - // standard IPv6 address. - // |: Alternation operator to allow matching either the IPv6 pattern - // with a prefix or the IPv4 pattern. - // '(?:\d+\.){3}\d+: This part matches a standard IPv4 address. - re := regexp.MustCompile(`(?:v[0-9]+:)?([0-9a-fA-F]{1,4}(:[0-9a-fA-F]{1,4}){7}|(?:\d+\.){3}\d+)`) - ipMatches := re.FindStringSubmatch(addr) - - if len(ipMatches) > 0 { - ip := net.ParseIP(ipMatches[1]) - if ip != nil { + versionPrefixRe := regexp.MustCompile(`(?:v\d+:)`) + for hostPair := range strings.SplitSeq(addr, " ") { + hostPair = versionPrefixRe.ReplaceAllString(hostPair, "") + + host, _, err := net.SplitHostPort(hostPair) + if err != nil { + // We might fail if there is no port specified, in that case continue + // as if it's just an IP address and try to parse it. + host = hostPair + } + + if ip := net.ParseIP(host); ip != nil { return ip.String(), nil } } diff --git a/internal/util/util_test.go b/internal/util/util_test.go index 3010fa6424d..b24c7e77815 100644 --- a/internal/util/util_test.go +++ b/internal/util/util_test.go @@ -300,10 +300,16 @@ func TestParseClientIP(t *testing.T) { }, { name: "IPv6 address", - addr: "2001:0db8:85a3:0000:0000:8a2e:0370:7334:0/2686266785", + addr: "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:0/2686266785", want: "2001:db8:85a3::8a2e:370:7334", wantErr: false, }, + { + name: "Compressed IPv6 address", + addr: "[fd98::4]:0/2686266785", + want: "fd98::4", + wantErr: false, + }, { name: "Invalid address", addr: "invalid", From 55f6140298ffe9595ddf7690473262dcd1bc3473 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Tue, 2 Sep 2025 09:03:33 +0200 Subject: [PATCH 045/157] ci: let Mergify label NVMe-oF PRs Anything that contains NVMe-oF or nvmeof in the PR title can be labelled with component/nvme-of. Signed-off-by: Niels de Vos --- .mergify.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 86081e38b5c..3cb90ed65ea 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -411,6 +411,13 @@ pull_request_rules: label: add: - component/rbd + - name: title contains NVME-oF or nvmeof + conditions: + - "title~=(NVMe-oF)|(nvmeof)" + actions: + label: + add: + - component/nvme-of - name: title contains CI, testing or e2e conditions: - "title~=(ci: )|(testing: )|(e2e)" From ed2e87171d5901872548aeb7d630e57a953c8fcb Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Wed, 3 Sep 2025 16:52:28 +0200 Subject: [PATCH 046/157] nvmeof: add unit-testing for helper functions These functions do not communicate with the Ceph cluster or NVMe-oF gateways and are good candidates for unit-testing. Signed-off-by: Niels de Vos --- .../controller/controllerserver_test.go | 148 ++++++++++++++++++ internal/nvmeof/nvmeof_test.go | 59 +++++++ 2 files changed, 207 insertions(+) create mode 100644 internal/nvmeof/controller/controllerserver_test.go create mode 100644 internal/nvmeof/nvmeof_test.go diff --git a/internal/nvmeof/controller/controllerserver_test.go b/internal/nvmeof/controller/controllerserver_test.go new file mode 100644 index 00000000000..aa7147edf02 --- /dev/null +++ b/internal/nvmeof/controller/controllerserver_test.go @@ -0,0 +1,148 @@ +/* +Copyright 2025 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "strconv" + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/stretchr/testify/require" + + "github.com/ceph/ceph-csi/internal/nvmeof" +) + +func TestExtractHostNQNFromNodeID(t *testing.T) { + t.Parallel() + + //nolint:lll // these tests just contain long strings + tests := []struct { + nodeID string + hostNQN string + shouldFail bool + }{ + { + nodeID: "ip-10-0-79-198.us-east-2.compute.internal::nqn.2025-08.io.ceph:ip-10-0-79-198.us-east-2.compute.internal", + hostNQN: "nqn.2025-08.io.ceph:ip-10-0-79-198.us-east-2.compute.internal", + }, + { + nodeID: "ip-10-0-79-198.us-east-2.compute.internal::nqn.2014-08.org.nvmexpress:uuid:8d018d47-1a5b-40cc-b26c-4ec9351d6723", + hostNQN: "nqn.2014-08.org.nvmexpress:uuid:8d018d47-1a5b-40cc-b26c-4ec9351d6723", + }, + { + nodeID: "ip-10-0-79-198.us-east-2.compute.internal", + shouldFail: true, + }, + } + + for _, test := range tests { + hostNQN, err := extractHostNQNFromNodeID(test.nodeID) + if test.shouldFail { + require.Error(t, err) + + continue + } + + require.Equal(t, test.hostNQN, hostNQN) + } +} + +func TestToRBDMetadataKey(t *testing.T) { + t.Parallel() + + mdKey := toRBDMetadataKey("SubsystemNQN") + require.Equal(t, ".rbd.nvmeof.SubsystemNQN", mdKey) +} + +func TestPolulateVolumeContext(t *testing.T) { + t.Parallel() + + volume := &csi.Volume{} + config := &nvmeof.NVMeoFVolumeData{ + SubsystemNQN: "nqn.2014-08.org.nvmexpress:uuid:e61ecd13-2727-42a3-947e-2127d63abacc", + NamespaceID: 42, + NamespaceUUID: "c1a0223f-a9ba-4f10-89c2-7496ad50e026", + ListenerInfo: nvmeof.ListenerDetails{ + GatewayAddress: nvmeof.GatewayAddress{ + Address: "127.0.0.1", + Port: 4420, + }, + Hostname: "localhost", + }, + GatewayManagementInfo: nvmeof.GatewayConfig{ + Address: "127.0.0.2", + Port: 5500, + }, + } + + populateVolumeContext(volume, config) + + require.Equal(t, config.SubsystemNQN, volume.GetVolumeContext()["SubsystemNQN"]) + require.Equal(t, strconv.FormatUint(uint64(config.NamespaceID), 10), volume.GetVolumeContext()["NamespaceID"]) + require.Equal(t, config.NamespaceUUID, volume.GetVolumeContext()["NamespaceUUID"]) + require.Equal(t, config.ListenerInfo.Address, volume.GetVolumeContext()["ListenerAddress"]) + require.Equal(t, strconv.FormatUint(uint64(config.ListenerInfo.Port), 10), volume.GetVolumeContext()["ListenerPort"]) + require.Equal(t, config.ListenerInfo.Hostname, volume.GetVolumeContext()["ListenerHostname"]) + require.Equal(t, config.GatewayManagementInfo.Address, volume.GetVolumeContext()["GatewayAddress"]) + require.Equal(t, + strconv.FormatUint(uint64(config.GatewayManagementInfo.Port), 10), + volume.GetVolumeContext()["GatewayPort"]) +} + +func TestGetGatewayConfigFromRequest(t *testing.T) { + t.Parallel() + + tests := []struct { + params map[string]string + shouldFail bool + }{ + { + params: nil, + shouldFail: true, + }, + { + params: map[string]string{ + "nvmeofGatewayAddress": "127.0.0.1", + }, + shouldFail: true, + }, + { + params: map[string]string{ + "nvmeofGatewayPort": "5500", + }, + shouldFail: true, + }, + { + params: map[string]string{ + "nvmeofGatewayAddress": "127.0.0.1", + "nvmeofGatewayPort": "5500", + }, + }, + } + + for _, test := range tests { + config, err := getGatewayConfigFromRequest(test.params) + if test.shouldFail { + require.Error(t, err) + + continue + } + + require.NoError(t, err) + require.NotNil(t, config) + } +} diff --git a/internal/nvmeof/nvmeof_test.go b/internal/nvmeof/nvmeof_test.go new file mode 100644 index 00000000000..60a190c7196 --- /dev/null +++ b/internal/nvmeof/nvmeof_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2025 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nvmeof + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGatewayAddress_String(t *testing.T) { + t.Parallel() + tests := []struct { + address string + port uint32 + result string + }{ + {"127.0.0.1", 5500, "127.0.0.1:5500"}, + {"localhost", 0o055 /* octal: 45 */, "localhost:45"}, + {"localhost.localdomain", 0, "localhost.localdomain:0"}, + } + + for _, test := range tests { + gw := GatewayAddress{ + Address: test.address, + Port: test.port, + } + + require.Equal(t, test.result, gw.String()) + } +} + +func TestGatewayRpcClient_generateSerialNumber(t *testing.T) { + t.Parallel() + + client := &GatewayRpcClient{ + // ransom serial should always result in 0 + maxSerial: big.NewInt(1), + } + + serial, err := client.generateSerialNumber() + require.NoError(t, err) + require.Equal(t, "Ceph2", serial) +} From cc6a144c28e8ca3eed4ea0d9e133fbdb42897683 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Sep 2025 15:49:31 +0000 Subject: [PATCH 047/157] rebase: bump the github-dependencies group across 1 directory with 2 updates Bumps the github-dependencies group with 2 updates in the /e2e directory: [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) and [github.com/onsi/gomega](https://github.com/onsi/gomega). Updates `github.com/onsi/ginkgo/v2` from 2.23.4 to 2.25.1 - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.23.4...v2.25.1) Updates `github.com/onsi/gomega` from 1.38.0 to 1.38.1 - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.38.0...v1.38.1) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-version: 2.25.1 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: github-dependencies - dependency-name: github.com/onsi/gomega dependency-version: 1.38.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: github-dependencies ... Signed-off-by: dependabot[bot] --- e2e/go.mod | 28 +- e2e/go.sum | 54 +- .../Masterminds/semver/v3/.gitignore | 1 + .../Masterminds/semver/v3/.golangci.yml | 27 + .../Masterminds/semver/v3/CHANGELOG.md | 268 + .../Masterminds/semver/v3/LICENSE.txt | 19 + .../github.com/Masterminds/semver/v3/Makefile | 31 + .../Masterminds/semver/v3/README.md | 274 + .../Masterminds/semver/v3/SECURITY.md | 19 + .../Masterminds/semver/v3/collection.go | 24 + .../Masterminds/semver/v3/constraints.go | 601 + .../github.com/Masterminds/semver/v3/doc.go | 184 + .../Masterminds/semver/v3/version.go | 788 + .../github.com/onsi/ginkgo/v2/CHANGELOG.md | 54 + .../github.com/onsi/ginkgo/v2/core_dsl.go | 32 +- .../onsi/ginkgo/v2/decorator_dsl.go | 43 + .../ginkgo/v2/ginkgo/build/build_command.go | 1 - .../ginkgo/v2/ginkgo/watch/dependencies.go | 15 +- .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 3 + .../onsi/ginkgo/v2/internal/around_node.go | 34 + .../onsi/ginkgo/v2/internal/focus.go | 9 +- .../onsi/ginkgo/v2/internal/group.go | 28 +- .../onsi/ginkgo/v2/internal/node.go | 83 +- .../ginkgo/v2/internal/progress_report.go | 2 +- .../onsi/ginkgo/v2/internal/spec_context.go | 27 + .../onsi/ginkgo/v2/internal/suite.go | 39 +- .../internal/testingtproxy/testing_t_proxy.go | 6 + .../ginkgo/v2/reporters/default_reporter.go | 31 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 9 + .../ginkgo/v2/reporters/teamcity_report.go | 8 + .../onsi/ginkgo/v2/types/around_node.go | 56 + .../github.com/onsi/ginkgo/v2/types/config.go | 16 +- .../github.com/onsi/ginkgo/v2/types/errors.go | 18 + .../onsi/ginkgo/v2/types/semver_filter.go | 60 + .../github.com/onsi/ginkgo/v2/types/types.go | 139 +- .../onsi/ginkgo/v2/types/version.go | 2 +- .../github.com/onsi/gomega/CHANGELOG.md | 10 + .../github.com/onsi/gomega/gomega_dsl.go | 2 +- .../onsi/gomega/internal/async_assertion.go | 2 +- .../matchers/be_comparable_to_matcher.go | 3 +- .../gomega/matchers/match_yaml_matcher.go | 2 +- .../go.opentelemetry.io/otel/.clomonitor.yml | 3 + .../go.opentelemetry.io/otel/.golangci.yml | 6 +- .../go.opentelemetry.io/otel/CHANGELOG.md | 60 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 17 +- e2e/vendor/go.opentelemetry.io/otel/Makefile | 2 +- e2e/vendor/go.opentelemetry.io/otel/README.md | 1 + .../go.opentelemetry.io/otel/RELEASING.md | 23 + .../otel/dependencies.Dockerfile | 4 +- .../otel/sdk/resource/builtin.go | 2 +- .../otel/sdk/resource/container.go | 2 +- .../otel/sdk/resource/env.go | 2 +- .../otel/sdk/resource/host_id.go | 2 +- .../otel/sdk/resource/os.go | 2 +- .../otel/sdk/resource/process.go | 2 +- .../otel/sdk/trace/batch_span_processor.go | 3 +- .../otel/sdk/trace/span.go | 2 +- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../otel/semconv/v1.34.0/MIGRATION.md | 4 + .../otel/semconv/v1.34.0/README.md | 3 + .../otel/semconv/v1.34.0/attribute_group.go | 13851 ++++++++++++++++ .../otel/semconv/v1.34.0/doc.go | 9 + .../otel/semconv/v1.34.0/exception.go | 9 + .../otel/semconv/v1.34.0/schema.go | 9 + .../go.opentelemetry.io/otel/trace/auto.go | 2 +- .../go.opentelemetry.io/otel/version.go | 2 +- .../go.opentelemetry.io/otel/versions.yaml | 11 +- e2e/vendor/go.yaml.in/yaml/v3/LICENSE | 50 + e2e/vendor/go.yaml.in/yaml/v3/NOTICE | 13 + e2e/vendor/go.yaml.in/yaml/v3/README.md | 171 + e2e/vendor/go.yaml.in/yaml/v3/apic.go | 747 + e2e/vendor/go.yaml.in/yaml/v3/decode.go | 1018 ++ e2e/vendor/go.yaml.in/yaml/v3/emitterc.go | 2054 +++ e2e/vendor/go.yaml.in/yaml/v3/encode.go | 577 + e2e/vendor/go.yaml.in/yaml/v3/parserc.go | 1274 ++ e2e/vendor/go.yaml.in/yaml/v3/readerc.go | 434 + e2e/vendor/go.yaml.in/yaml/v3/resolve.go | 326 + e2e/vendor/go.yaml.in/yaml/v3/scannerc.go | 3040 ++++ e2e/vendor/go.yaml.in/yaml/v3/sorter.go | 134 + e2e/vendor/go.yaml.in/yaml/v3/writerc.go | 48 + e2e/vendor/go.yaml.in/yaml/v3/yaml.go | 703 + e2e/vendor/go.yaml.in/yaml/v3/yamlh.go | 811 + e2e/vendor/go.yaml.in/yaml/v3/yamlprivateh.go | 198 + .../x/tools/go/ast/inspector/inspector.go | 1 + .../x/tools/go/ast/inspector/typeof.go | 2 - .../google.golang.org/grpc/MAINTAINERS.md | 8 +- .../endpointsharding/endpointsharding.go | 21 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 6 +- .../google.golang.org/grpc/clientconn.go | 11 +- .../grpc/credentials/credentials.go | 25 +- .../google.golang.org/grpc/credentials/tls.go | 30 +- .../google.golang.org/grpc/dialoptions.go | 2 + .../grpc/internal/envconfig/envconfig.go | 16 +- .../grpc/internal/internal.go | 29 - .../internal/resolver/dns/dns_resolver.go | 20 +- .../google.golang.org/grpc/picker_wrapper.go | 36 +- .../grpc/resolver/resolver.go | 5 + e2e/vendor/google.golang.org/grpc/server.go | 1 + .../google.golang.org/grpc/stats/stats.go | 20 +- e2e/vendor/google.golang.org/grpc/stream.go | 32 +- e2e/vendor/google.golang.org/grpc/version.go | 2 +- e2e/vendor/modules.txt | 33 +- 102 files changed, 28696 insertions(+), 289 deletions(-) create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/.gitignore create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/.golangci.yml create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/LICENSE.txt create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/Makefile create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/README.md create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/SECURITY.md create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/collection.go create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/constraints.go create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/doc.go create mode 100644 e2e/vendor/github.com/Masterminds/semver/v3/version.go create mode 100644 e2e/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go create mode 100644 e2e/vendor/github.com/onsi/ginkgo/v2/types/around_node.go create mode 100644 e2e/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go create mode 100644 e2e/vendor/go.opentelemetry.io/otel/.clomonitor.yml create mode 100644 e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md create mode 100644 e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md create mode 100644 e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go create mode 100644 e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go create mode 100644 e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go create mode 100644 e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/LICENSE create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/NOTICE create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/README.md create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/apic.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/decode.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/emitterc.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/encode.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/parserc.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/readerc.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/resolve.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/scannerc.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/sorter.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/writerc.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/yaml.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/yamlh.go create mode 100644 e2e/vendor/go.yaml.in/yaml/v3/yamlprivateh.go diff --git a/e2e/go.mod b/e2e/go.mod index 7e1eb4359f9..89cf114fa2a 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -13,8 +13,8 @@ require ( github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/google/uuid v1.6.0 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 - github.com/onsi/ginkgo/v2 v2.23.4 - github.com/onsi/gomega v1.38.0 + github.com/onsi/ginkgo/v2 v2.25.2 + github.com/onsi/gomega v1.38.2 // when updating k8s.io modules, update the 'replace' section below too k8s.io/api v0.33.4 k8s.io/apimachinery v0.33.4 @@ -39,6 +39,7 @@ replace ( require ( cel.dev/expr v0.24.0 // indirect github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -108,14 +109,15 @@ require ( go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect - go.opentelemetry.io/otel/metric v1.36.0 // indirect - go.opentelemetry.io/otel/sdk v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.41.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.43.0 // indirect @@ -125,29 +127,29 @@ require ( golang.org/x/term v0.34.0 // indirect golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.35.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect - google.golang.org/grpc v1.74.2 // indirect + golang.org/x/tools v0.36.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/grpc v1.75.0 // indirect google.golang.org/protobuf v1.36.7 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.33.1 // indirect + k8s.io/apiextensions-apiserver v0.33.4 // indirect k8s.io/apiserver v0.33.4 // indirect k8s.io/component-base v0.33.4 // indirect k8s.io/component-helpers v0.33.4 // indirect k8s.io/controller-manager v0.33.4 // indirect k8s.io/cri-api v0.33.2 // indirect k8s.io/cri-client v0.0.0 // indirect - k8s.io/csi-translation-lib v0.33.1 // indirect + k8s.io/csi-translation-lib v0.33.4 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect k8s.io/kube-scheduler v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.33.2 // indirect - k8s.io/mount-utils v0.33.3 // indirect + k8s.io/mount-utils v0.33.4 // indirect k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff --git a/e2e/go.sum b/e2e/go.sum index 522b5c61ec5..14aa8af840c 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -2,6 +2,8 @@ cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= @@ -136,10 +138,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= -github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= +github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= +github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/cgroups v0.0.1 h1:MXjMkkFpKv6kpuirUa4USFBas573sSAY082B4CiHEVA= github.com/opencontainers/cgroups v0.0.1/go.mod h1:s8lktyhlGUqM7OSRL5P7eAW6Wb+kWPNvt4qvVfzA5vs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -198,20 +200,20 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -222,6 +224,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -262,18 +266,20 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= -golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ= -google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= -google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -324,8 +330,8 @@ k8s.io/kubelet v0.33.2 h1:wxEau5/563oJb3j3KfrCKlNWWx35YlSgDLOYUBCQ0pg= k8s.io/kubelet v0.33.2/go.mod h1:way8VCDTUMiX1HTOvJv7M3xS/xNysJI6qh7TOqMe5KM= k8s.io/kubernetes v1.33.4 h1:T1d5FLUYm3/KyUeV7YJhKTR980zHCHb7K2xhCSo3lE8= k8s.io/kubernetes v1.33.4/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00= -k8s.io/mount-utils v0.33.3 h1:Q1jsnqdS4LdtJSYSXgiQv/XNrRHQncLk3gMYjKNSZrE= -k8s.io/mount-utils v0.33.3/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE= +k8s.io/mount-utils v0.33.4 h1:o83Qx0AgY5JDYhFV6gAYfSy+GAPIuPqSKdEqac5lxqo= +k8s.io/mount-utils v0.33.4/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE= k8s.io/pod-security-admission v0.33.4 h1:adSwY7a/Q4Eoj+uCUfav90xRe6mB8waF0HAZ4gZeWD0= k8s.io/pod-security-admission v0.33.4/go.mod h1:K+4JaqBz5yqE7TXf3g5zEiBLcu9RdW2BjTWydFEror4= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/.gitignore b/e2e/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 00000000000..6b061e6174b --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/e2e/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 00000000000..fbc6332592f --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,27 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - govet + - staticcheck + - errcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/e2e/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 00000000000..fabe5e43dc8 --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,268 @@ +# Changelog + +## 3.4.0 (2025-06-27) + +### Added + +- #268: Added property to Constraints to include prereleases for Check and Validate + +### Changed + +- #263: Updated Go testing for 1.24, 1.23, and 1.22 +- #269: Updated the error message handling for message case and wrapping errors +- #266: Restore the ability to have leading 0's when parsing with NewVersion. + Opt-out of this by setting CoerceNewVersion to false. + +### Fixed + +- #257: Fixed the CodeQL link (thanks @dmitris) +- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out + of this by setting DetailedNewVersionErrors to false for faster performance. +- #267: Handle pre-releases for an "and" group if one constraint includes them + +## 3.3.1 (2024-11-19) + +### Fixed + +- #253: Fix for allowing some version that were invalid + +## 3.3.0 (2024-08-27) + +### Added + +- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser) +- #213: nil version equality checking (thanks @KnutZuidema) + +### Changed + +- #241: Simplify StrictNewVersion parsing (thanks @grosser) +- Testing support up through Go 1.23 +- Minimum version set to 1.21 as this is what's tested now +- Fuzz testing now supports caching + +## 3.2.1 (2023-04-10) + +### Changed + +- #198: Improved testing around pre-release names +- #200: Improved code scanning with addition of CodeQL +- #201: Testing now includes Go 1.20. Go 1.17 has been dropped +- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily +- #203: Docs updated for security details + +### Fixed + +- #199: Fixed issue with range transformations + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/e2e/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 00000000000..9ff7da9c48b --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/Makefile b/e2e/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 00000000000..9ca87a2c79e --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,31 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go env GOCACHE + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2 diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/README.md b/e2e/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 00000000000..2f56c676a5d --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,274 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +## Package Versions + +Note, import `github.com/Masterminds/semver/v3` to use the latest version. + +There are three major versions fo the `semver` package. + +* 3.x.x is the stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +There are package level variables that affect how `NewVersion` handles parsing. + +- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant + versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch + part. This enables the use of CalVer in versions even when not compliant with SemVer. + When set to `false` less coercion work is done. +- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when + `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true` + it can provide some more insight into why a version is invalid. Setting + `DetailedNewVersionErrors` to `false` is faster on performance but provides less + detailed error messages if a version fails to parse. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include pre-releases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering pre-releases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The variable a will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification, pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer's comparisons using constraints without a pre-release comparator will skip +pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +The `Constraints` instance returned from `semver.NewConstraint()` has a property +`IncludePrerelease` that, when set to true, will return prerelease versions when calls +to `Check()` and `Validate()` are made. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's +parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`. + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://codeql.github.com) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/e2e/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 00000000000..a30a66b1f74 --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/collection.go b/e2e/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 00000000000..a78235895fd --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/constraints.go b/e2e/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 00000000000..8b7a10f8369 --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,601 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint + containsPre []bool + + // IncludePrerelease specifies if pre-releases should be included in + // the results. Note, if a constraint range has a prerelease than + // prereleases will be included for that AND group even if this is + // set to false. + IncludePrerelease bool +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + lenors := len(ors) + or := make([][]*constraint, lenors) + hasPre := make([]bool, lenors) + for k, v := range ors { + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + // If one of the constraints has a prerelease record this. + // This information is used when checking all in an "and" + // group to ensure they all check for prereleases. + if pc.con.pre != "" { + hasPre[k] = true + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{ + constraints: or, + containsPre: hasPre, + } + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for i, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for i, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version, includePre bool) (bool, error) { + return constraintOps[c.origfunc](v, c, includePre) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint, includePre bool) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint parser error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint parser error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) { + + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c, includePre) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/doc.go b/e2e/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 00000000000..74f97caa57f --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/e2e/vendor/github.com/Masterminds/semver/v3/version.go b/e2e/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 00000000000..7a3ba738876 --- /dev/null +++ b/e2e/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,788 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var looseVersionRegex *regexp.Regexp + +// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are +// not allowed in a valid semantic version. When set to true, NewVersion will coerce +// leading 0's into a valid version. +var CoerceNewVersion = true + +// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion +// function. This is used when CoerceNewVersion is set to false. If set to false +// ErrInvalidSemVer is returned for an invalid version. This does not apply to +// StrictNewVersion. Setting this function to false returns errors more quickly. +var DetailedNewVersionErrors = true + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("invalid semantic version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("invalid metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("invalid prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` + +// looseSemVerRegex is a regular expression that lets invalid semver expressions through +// with enough detail that certain errors can be checked for. +const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") + looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // Extract build metadata + if strings.Contains(parts[2], "+") { + extra := strings.SplitN(parts[2], "+", 2) + sv.metadata = extra[1] + parts[2] = extra[0] + if err := validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + // Extract build prerelease + if strings.Contains(parts[2], "-") { + extra := strings.SplitN(parts[2], "-", 2) + sv.pre = extra[1] + parts[2] = extra[0] + if err := validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract major, minor, and patch + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + if CoerceNewVersion { + return coerceNewVersion(v) + } + m := versionRegex.FindStringSubmatch(v) + if m == nil { + + // Disabling detailed errors is first so that it is in the fast path. + if !DetailedNewVersionErrors { + return nil, ErrInvalidSemVer + } + + // Check for specific errors with the semver string and return a more detailed + // error. + m = looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + err := validateVersion(m) + if err != nil { + return nil, err + } + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[5], + pre: m[4], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +func coerceNewVersion(v string) (*Version, error) { + m := looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanEqual tests if one version is less or equal than another one. +func (v *Version) LessThanEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanEqual tests if one version is greater or equal than another one. +func (v *Version) GreaterThanEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + if v == o { + return true + } + if v == nil || o == nil { + return false + } + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if p == "" { + return ErrInvalidPrerelease + } else if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if p == "" { + return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} + +// validateVersion checks for common validation issues but may not catch all errors +func validateVersion(m []string) error { + var err error + var v string + if m[1] != "" { + if len(m[1]) > 1 && m[1][0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[2] != "" { + v = strings.TrimPrefix(m[2], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[3] != "" { + v = strings.TrimPrefix(m[3], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[5] != "" { + if err = validatePrerelease(m[5]); err != nil { + return err + } + } + + if m[8] != "" { + if err = validateMetadata(m[8]); err != nil { + return err + } + } + + return nil +} diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/e2e/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 69b15d18483..1c4d5329fac 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,57 @@ +## 2.25.2 + +### Fixes +Add github output group for progress report content + +### Maintenance +Bump Gomega + +## 2.25.1 + +### Fixes +- fix(types): ignore nameless nodes on FullText() [10866d3] +- chore: fix some CodeQL warnings [2e42cff] + +## 2.25.0 + +### `AroundNode` + +This release introduces a new decorator to support more complex spec setup usecases. + +`AroundNode` registers a function that runs before each individual node. This is considered a more advanced decorator. + +Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information and some examples. + +Allowed signatures: + +- `AroundNode(func())` - `func` will be called before the node is run. +- `AroundNode(func(ctx context.Context) context.Context)` - `func` can wrap the passed in context and return a new one which will be passed on to the node. +- `AroundNode(func(ctx context.Context, body func(ctx context.Context)))` - `ctx` is the context for the node and `body` is a function that must be called to run the node. This gives you complete control over what runs before and after the node. + +Multiple `AroundNode` decorators can be applied to a single node and they will run in the order they are applied. + +Unlike setup nodes like `BeforeEach` and `DeferCleanup`, `AroundNode` is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call `runtime.LockOSThread()` in the `AroundNode` to ensure that the node runs on a single thread). + +Since `AroundNode` allows you to modify the context you can also use `AroundNode` to implement shared setup that attaches values to the context. + +If applied to a container, `AroundNode` will run before every node in the container. Including setup nodes like `BeforeEach` and `DeferCleanup`. + +`AroundNode` can also be applied to `RunSpecs` to run before every node in the suite. This opens up new mechanisms for instrumenting individual nodes across an entire suite. + +## 2.24.0 + +### Features + +Specs can now be decorated with (e.g.) `SemVerConstraint("2.1.0")` and `ginkgo --sem-ver-filter="2.1.1"` will only run constrained specs that match the requested version. Learn more in the docs [here](https://onsi.github.io/ginkgo/#spec-semantic-version-filtering)! Thanks to @Icarus9913 for the PR. + +### Fixes + +- remove -o from run command [3f5d379]. fixes [#1582](https://github.com/onsi/ginkgo/issues/1582) + +### Maintenance + +Numerous dependency bumps and documentation fixes + ## 2.23.4 Prior to this release Ginkgo would compute the incorrect number of available CPUs when running with `-p` in a linux container. Thanks to @emirot for the fix! diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/e2e/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index d027bdff930..ec41e8837c8 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -186,6 +186,20 @@ func GinkgoLabelFilter() string { return suiteConfig.LabelFilter } +/* +GinkgoSemVerFilter() returns the semantic version filter configured for this suite via `--sem-ver-filter`. + +You can use this to manually check if a set of semantic version constraints would satisfy the filter via: + + if (SemVerConstraint("> 2.6.0", "< 2.8.0").MatchesSemVerFilter(GinkgoSemVerFilter())) { + //... + } +*/ +func GinkgoSemVerFilter() string { + suiteConfig, _ := GinkgoConfiguration() + return suiteConfig.SemVerFilter +} + /* PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant when running in parallel and output to stdout/stderr is being intercepted. You generally @@ -254,7 +268,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) var reporter reporters.Reporter if suiteConfig.ParallelTotal == 1 { @@ -297,7 +311,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) outputInterceptor.Shutdown() flagSet.ValidateDeprecations(deprecationTracker) @@ -316,8 +330,10 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { return passed } -func extractSuiteConfiguration(args []any) Labels { +func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.AroundNodes) { suiteLabels := Labels{} + suiteSemVerConstraints := SemVerConstraints{} + aroundNodes := types.AroundNodes{} configErrors := []error{} for _, arg := range args { switch arg := arg.(type) { @@ -327,6 +343,10 @@ func extractSuiteConfiguration(args []any) Labels { reporterConfig = arg case Labels: suiteLabels = append(suiteLabels, arg...) + case SemVerConstraints: + suiteSemVerConstraints = append(suiteSemVerConstraints, arg...) + case types.AroundNodeDecorator: + aroundNodes = append(aroundNodes, arg) default: configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) } @@ -342,7 +362,7 @@ func extractSuiteConfiguration(args []any) Labels { os.Exit(1) } - return suiteLabels + return suiteLabels, suiteSemVerConstraints, aroundNodes } func getwd() (string, error) { @@ -365,7 +385,7 @@ func PreviewSpecs(description string, args ...any) Report { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1 defer func() { @@ -383,7 +403,7 @@ func PreviewSpecs(description string, args ...any) Report { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) return global.Suite.GetPreviewReport() } diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/e2e/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index c65af4ce1cf..8bee5acebdb 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" ) /* @@ -99,6 +100,23 @@ You can learn more here: https://onsi.github.io/ginkgo/#spec-labels */ type Labels = internal.Labels +/* +SemVerConstraint decorates specs with SemVerConstraints. Multiple semantic version constraints can be passed to SemVerConstraint and these strings must follow the semantic version constraint rules. +SemVerConstraints can be applied to container and subject nodes, but not setup nodes. You can provide multiple SemVerConstraints to a given node and a spec's semantic version constraints is the union of all semantic version constraints in its node hierarchy. + +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func SemVerConstraint(semVerConstraints ...string) SemVerConstraints { + return SemVerConstraints(semVerConstraints) +} + +/* +SemVerConstraints are the type for spec SemVerConstraint decorators. Use SemVerConstraint(...) to construct SemVerConstraints. +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +*/ +type SemVerConstraints = internal.SemVerConstraints + /* PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node. @@ -141,3 +159,28 @@ SuppressProgressReporting is a decorator that allows you to disable progress rep if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports. */ const SuppressProgressReporting = internal.SuppressProgressReporting + +/* +AroundNode registers a function that runs before each individual node. This is considered a more advanced decorator. + +Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information. + +Allowed signatures: + +- AroundNode(func()) - func will be called before the node is run. +- AroundNode(func(ctx context.Context) context.Context) - func can wrap the passed in context and return a new one which will be passed on to the node. +- AroundNode(func(ctx context.Context, body func(ctx context.Context))) - ctx is the context for the node and body is a function that must be called to run the node. This gives you complete control over what runs before and after the node. + +Multiple AroundNode decorators can be applied to a single node and they will run in the order they are applied. + +Unlike setup nodes like BeforeEach and DeferCleanup, AroundNode is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call runtime.LockOSThread() in the AroundNode to ensure that the node runs on a single thread). + +Since AroundNode allows you to modify the context you can also use AroundNode to implement shared setup that attaches values to the context. You must return a context that inherits from the passed in context. + +If applied to a container, AroundNode will run before every node in the container. Including setup nodes like BeforeEach and DeferCleanup. + +AroundNode can also be applied to RunSpecs to run before every node in the suite. +*/ +func AroundNode[F types.AroundNodeAllowedFuncs](f F) types.AroundNodeDecorator { + return types.AroundNode(f, types.NewCodeLocation(1)) +} diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 2b36b2feb90..3021dfec2eb 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -29,7 +29,6 @@ func BuildBuildCommand() command.Command { var errors []error cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) command.AbortIfErrors("Ginkgo detected configuration issues:", errors) - buildSpecs(args, cliConfig, goFlagsConfig) }, } diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go index a34d94354d9..75cbdb49625 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -2,12 +2,9 @@ package watch import ( "go/build" - "regexp" + "strings" ) -var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) -var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing - type Dependencies struct { deps map[string]int } @@ -78,7 +75,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) { if err != nil { continue } - if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + if !pkg.Goroot && (!matchesGinkgoOrGomega(pkg.Dir) || matchesGinkgoIntegration(pkg.Dir)) { d.addDepIfNotPresent(pkg.Dir, depth) } } @@ -90,3 +87,11 @@ func (d Dependencies) addDepIfNotPresent(dep string, depth int) { d.deps[dep] = depth } } + +func matchesGinkgoOrGomega(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo") || strings.Contains(s, "github.com/onsi/gomega") +} + +func matchesGinkgoIntegration(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo/integration") // allow us to integration test this thing +} diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 993279de293..cabf2814571 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "context" + "io" "testing" "github.com/onsi/ginkgo/v2/internal/testingtproxy" @@ -69,6 +70,8 @@ type GinkgoTInterface interface { Skipf(format string, args ...any) Skipped() bool TempDir() string + Attr(key, value string) + Output() io.Writer } /* diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go new file mode 100644 index 00000000000..c9657102059 --- /dev/null +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go @@ -0,0 +1,34 @@ +package internal + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +func ComputeAroundNodes(specs Specs) Specs { + out := Specs{} + for _, spec := range specs { + nodes := Nodes{} + currentNestingLevel := 0 + aroundNodes := types.AroundNodes{} + nestingLevelIndices := []int{} + for _, node := range spec.Nodes { + switch node.NodeType { + case types.NodeTypeContainer: + currentNestingLevel = node.NestingLevel + 1 + nestingLevelIndices = append(nestingLevelIndices, len(aroundNodes)) + aroundNodes = aroundNodes.Append(node.AroundNodes...) + nodes = append(nodes, node) + default: + if currentNestingLevel > node.NestingLevel { + currentNestingLevel = node.NestingLevel + aroundNodes = aroundNodes[:nestingLevelIndices[currentNestingLevel]] + } + node.AroundNodes = types.AroundNodes{}.Append(aroundNodes...).Append(node.AroundNodes...) + nodes = append(nodes, node) + } + } + spec.Nodes = nodes + out = append(out, spec) + } + return out +} diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/focus.go index e3da7d14dd1..a39daf5a60b 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/focus.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -56,7 +56,7 @@ This function sets the `Skip` property on specs by applying Ginkgo's focus polic *Note:* specs with pending nodes are Skipped when created by NewSpec. */ -func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { +func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") @@ -84,6 +84,13 @@ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suit }) } + if suiteConfig.SemVerFilter != "" { + semVerFilter, _ := types.ParseSemVerFilter(suiteConfig.SemVerFilter) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !semVerFilter(UnionOfSemVerConstraints(suiteSemVerConstraints, spec.Nodes.UnionOfSemVerConstraints())) + }) + } + if len(suiteConfig.FocusFiles) > 0 { focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles) skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) }) diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/group.go index 02c9fe4fcd4..b88fe2060a5 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -112,19 +112,21 @@ func newGroup(suite *Suite) *group { func (g *group) initialReportForSpec(spec Spec) types.SpecReport { return types.SpecReport{ - ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), - ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), - ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), - LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, - LeafNodeType: types.NodeTypeIt, - LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, - LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), - ParallelProcess: g.suite.config.ParallelProcess, - RunningInParallel: g.suite.isRunningInParallel(), - IsSerial: spec.Nodes.HasNodeMarkedSerial(), - IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), - MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), - MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + ContainerHierarchySemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).SemVerConstraints(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + LeafNodeSemVerConstraints: []string(spec.FirstNodeWithType(types.NodeTypeIt).SemVerConstraints), + ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), + MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), } } diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 8096950b6c6..647368feacb 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -55,11 +55,13 @@ type Node struct { FlakeAttempts int MustPassRepeatedly int Labels Labels + SemVerConstraints SemVerConstraints PollProgressAfter time.Duration PollProgressInterval time.Duration NodeTimeout time.Duration SpecTimeout time.Duration GracePeriod time.Duration + AroundNodes types.AroundNodes NodeIDWhereCleanupWasGenerated uint } @@ -85,31 +87,46 @@ type FlakeAttempts uint type MustPassRepeatedly uint type Offset uint type Done chan<- any // Deprecated Done Channel for asynchronous testing -type Labels []string type PollProgressInterval time.Duration type PollProgressAfter time.Duration type NodeTimeout time.Duration type SpecTimeout time.Duration type GracePeriod time.Duration +type Labels []string + func (l Labels) MatchesLabelFilter(query string) bool { return types.MustParseLabelFilter(query)(l) } -func UnionOfLabels(labels ...Labels) Labels { - out := Labels{} - seen := map[string]bool{} - for _, labelSet := range labels { - for _, label := range labelSet { - if !seen[label] { - seen[label] = true - out = append(out, label) +type SemVerConstraints []string + +func (svc SemVerConstraints) MatchesSemVerFilter(version string) bool { + return types.MustParseSemVerFilter(version)(svc) +} + +func unionOf[S ~[]E, E comparable](slices ...S) S { + out := S{} + seen := map[E]bool{} + for _, slice := range slices { + for _, item := range slice { + if !seen[item] { + seen[item] = true + out = append(out, item) } } } return out } +func UnionOfLabels(labels ...Labels) Labels { + return unionOf(labels...) +} + +func UnionOfSemVerConstraints(semVerConstraints ...SemVerConstraints) SemVerConstraints { + return unionOf(semVerConstraints...) +} + func PartitionDecorations(args ...any) ([]any, []any) { decorations := []any{} remainingArgs := []any{} @@ -151,6 +168,8 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(Labels{}): return true + case t == reflect.TypeOf(SemVerConstraints{}): + return true case t == reflect.TypeOf(PollProgressInterval(0)): return true case t == reflect.TypeOf(PollProgressAfter(0)): @@ -161,6 +180,8 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(GracePeriod(0)): return true + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + return true case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): return true default: @@ -191,6 +212,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy NodeType: nodeType, Text: text, Labels: Labels{}, + SemVerConstraints: SemVerConstraints{}, CodeLocation: types.NewCodeLocation(baseOffset), NestingLevel: -1, PollProgressAfter: -1, @@ -221,6 +243,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } labelsSeen := map[string]bool{} + semVerConstraintsSeen := map[string]bool{} trackedFunctionError := false args = remainingArgs remainingArgs = []any{} @@ -299,6 +322,8 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) } + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + node.AroundNodes = append(node.AroundNodes, arg.(types.AroundNodeDecorator)) case t == reflect.TypeOf(Labels{}): if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) @@ -311,6 +336,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(err) } } + case t == reflect.TypeOf(SemVerConstraints{}): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SemVerConstraint")) + } + for _, semVerConstraint := range arg.(SemVerConstraints) { + if !semVerConstraintsSeen[semVerConstraint] { + semVerConstraintsSeen[semVerConstraint] = true + semVerConstraint, err := types.ValidateAndCleanupSemVerConstraint(semVerConstraint, node.CodeLocation) + node.SemVerConstraints = append(node.SemVerConstraints, semVerConstraint) + appendError(err) + } + } case t.Kind() == reflect.Func: if nodeType.Is(types.NodeTypeContainer) { if node.Body != nil { @@ -824,6 +861,32 @@ func (n Nodes) UnionOfLabels() []string { return out } +func (n Nodes) SemVerConstraints() [][]string { + out := make([][]string, len(n)) + for i := range n { + if n[i].SemVerConstraints == nil { + out[i] = []string{} + } else { + out[i] = []string(n[i].SemVerConstraints) + } + } + return out +} + +func (n Nodes) UnionOfSemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for i := range n { + for _, constraint := range n[i].SemVerConstraints { + if !seen[constraint] { + seen[constraint] = true + out = append(out, constraint) + } + } + } + return out +} + func (n Nodes) CodeLocations() []types.CodeLocation { out := make([]types.CodeLocation, len(n)) for i := range n { @@ -928,7 +991,7 @@ func unrollInterfaceSlice(args any) []any { out := []any{} for i := 0; i < v.Len(); i++ { el := reflect.ValueOf(v.Index(i).Interface()) - if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { + if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) && el.Type() != reflect.TypeOf(SemVerConstraints{}) { out = append(out, unrollInterfaceSlice(el.Interface())...) } else { out = append(out, v.Index(i).Interface()) diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go index 11269cf1f24..165cbc4b678 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -236,7 +236,7 @@ func extractRunningGoroutines() ([]types.Goroutine, error) { } functionCall.Filename = line[:delimiterIdx] line = strings.Split(line[delimiterIdx+1:], " ")[0] - lineNumber, err := strconv.ParseInt(line, 10, 64) + lineNumber, err := strconv.ParseInt(line, 10, 32) functionCall.Line = int(lineNumber) if err != nil { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2d2ea2fc357..99c9c5f5bee 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -2,6 +2,7 @@ package internal import ( "context" + "reflect" "github.com/onsi/ginkgo/v2/types" ) @@ -11,6 +12,7 @@ type SpecContext interface { SpecReport() types.SpecReport AttachProgressReporter(func() string) func() + WrappedContext() context.Context } type specContext struct { @@ -45,3 +47,28 @@ func NewSpecContext(suite *Suite) *specContext { func (sc *specContext) SpecReport() types.SpecReport { return sc.suite.CurrentSpecReport() } + +func (sc *specContext) WrappedContext() context.Context { + return sc.Context +} + +/* +The user is allowed to wrap `SpecContext` in a new context.Context when using AroundNodes. But body functions expect SpecContext. +We support this by taking their context.Context and returning a SpecContext that wraps it. +*/ +func wrapContextChain(ctx context.Context) SpecContext { + if ctx == nil { + return nil + } + if reflect.TypeOf(ctx) == reflect.TypeOf(&specContext{}) { + return ctx.(*specContext) + } else if sc, ok := ctx.Value("GINKGO_SPEC_CONTEXT").(*specContext); ok { + return &specContext{ + Context: ctx, + ProgressReporterManager: sc.ProgressReporterManager, + cancel: sc.cancel, + suite: sc.suite, + } + } + return nil +} diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 3edf5077650..14a0688f89e 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -32,6 +32,7 @@ type Suite struct { suiteNodes Nodes cleanupNodes Nodes + aroundNodes types.AroundNodes failer *Failer reporter reporters.Reporter @@ -87,6 +88,7 @@ func (suite *Suite) Clone() (*Suite, error) { ProgressReporterManager: NewProgressReporterManager(), topLevelContainers: suite.topLevelContainers.Clone(), suiteNodes: suite.suiteNodes.Clone(), + aroundNodes: suite.aroundNodes.Clone(), selectiveLock: &sync.Mutex{}, }, nil } @@ -104,13 +106,14 @@ func (suite *Suite) BuildTree() error { return nil } -func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { +func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteAroundNodes types.AroundNodes, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { if suite.phase != PhaseBuildTree { panic("cannot run before building the tree = call suite.BuildTree() first") } ApplyNestedFocusPolicyToTree(suite.tree) specs := GenerateSpecsFromTreeRoot(suite.tree) - specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) + specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteSemVerConstraints, suiteConfig) + specs = ComputeAroundNodes(specs) suite.phase = PhaseRun suite.client = client @@ -120,6 +123,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string suite.outputInterceptor = outputInterceptor suite.interruptHandler = interruptHandler suite.config = suiteConfig + suite.aroundNodes = suiteAroundNodes if suite.config.Timeout > 0 { suite.deadline = time.Now().Add(suite.config.Timeout) @@ -127,7 +131,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) - success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + success := suite.runSpecs(description, suiteLabels, suiteSemVerConstraints, suitePath, hasProgrammaticFocus, specs) cancelProgressHandler() @@ -259,6 +263,7 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID node.NestingLevel = suite.currentNode.NestingLevel + node.AroundNodes = types.AroundNodes{}.Append(suite.currentNode.AroundNodes...).Append(node.AroundNodes...) suite.selectiveLock.Lock() suite.cleanupNodes = append(suite.cleanupNodes, node) suite.selectiveLock.Unlock() @@ -428,13 +433,14 @@ func (suite *Suite) processCurrentSpecReport() { } } -func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { +func (suite *Suite) runSpecs(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { numSpecsThatWillBeRun := specs.CountWithoutSkip() suite.report = types.Report{ SuitePath: suitePath, SuiteDescription: description, SuiteLabels: suiteLabels, + SuiteSemVerConstraints: suiteSemVerConstraints, SuiteConfig: suite.config, SuiteHasProgrammaticFocus: hasProgrammaticFocus, PreRunStats: types.PreRunStats{ @@ -891,7 +897,30 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ failureC <- failureFromRun }() - node.Body(sc) + aroundNodes := types.AroundNodes{}.Append(suite.aroundNodes...).Append(node.AroundNodes...) + if len(aroundNodes) > 0 { + i := 0 + var f func(context.Context) + f = func(c context.Context) { + sc := wrapContextChain(c) + if sc == nil { + suite.failer.Fail("An AroundNode failed to pass a valid Ginkgo SpecContext in. You must always pass in a context derived from the context passed to you.", aroundNodes[i].CodeLocation) + return + } + i++ + if i < len(aroundNodes) { + aroundNodes[i].Body(sc, f) + } else { + node.Body(sc) + } + } + aroundNodes[0].Body(sc, f) + if i != len(aroundNodes) { + suite.failer.Fail("An AroundNode failed to call the passed in function.", aroundNodes[i].CodeLocation) + } + } else { + node.Body(sc) + } finished = true }() diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index b4ecc7cb83d..9806e315a61 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -229,3 +229,9 @@ func (t *ginkgoTestingTProxy) ParallelTotal() int { func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() { return t.attachProgressReporter(f) } +func (t *ginkgoTestingTProxy) Output() io.Writer { + return t.writer +} +func (t *ginkgoTestingTProxy) Attr(key, value string) { + t.addReportEntry(key, value, internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose) +} diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 74ad0768b74..637232b227b 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -72,6 +72,9 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { if len(report.SuiteLabels) > 0 { r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) } + if len(report.SuiteSemVerConstraints) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteSemVerConstraints, ", "))) + } r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) if report.SuiteConfig.ParallelTotal > 1 { r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) @@ -87,6 +90,13 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { bannerWidth = len(labels) + 2 } } + if len(report.SuiteSemVerConstraints) > 0 { + semVerConstraints := strings.Join(report.SuiteSemVerConstraints, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", semVerConstraints)) + if len(semVerConstraints)+2 > bannerWidth { + bannerWidth = len(semVerConstraints) + 2 + } + } r.emitBlock(strings.Repeat("=", bannerWidth)) out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) @@ -504,6 +514,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput indent -= 1 } + if r.conf.GithubOutput { + r.emitBlock(r.fi(indent, "::group::Progress Report")) + } + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { r.emit("\n") r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) @@ -550,6 +564,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) r.emitGoroutines(indent, otherGoroutines...) } + + if r.conf.GithubOutput { + r.emitBlock(r.fi(indent, "::endgroup::")) + } } func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { @@ -698,8 +716,8 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { } func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { - texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} - texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + texts, locations, labels, semVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{} + texts, locations, labels, semVerConstraints = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...), append(semVerConstraints, report.ContainerHierarchySemVerConstraints...) if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) @@ -707,6 +725,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) + semVerConstraints = append(semVerConstraints, report.LeafNodeSemVerConstraints) locations = append(locations, report.LeafNodeLocation) failureLocation := report.Failure.FailureNodeLocation @@ -720,6 +739,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + semVerConstraints = append([][]string{{}}, semVerConstraints...) highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex @@ -747,6 +767,9 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(labels[i]) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) } + if len(semVerConstraints[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(semVerConstraints[i], ", ")) + } out += "\n" out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } @@ -770,6 +793,10 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) } + flattenedSemVerConstraints := report.SemVerConstraints() + if len(flattenedSemVerConstraints) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedSemVerConstraints, ", ")) + } out += "\n" if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 562e0f62ba2..828f893fb8f 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -36,6 +36,9 @@ type JunitReportConfig struct { // Enable OmitSpecLabels to prevent labels from appearing in the spec name OmitSpecLabels bool + // Enable OmitSpecSemVerConstraints to prevent semantic version constraints from appearing in the spec name + OmitSpecSemVerConstraints bool + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name OmitLeafNodeType bool @@ -169,9 +172,11 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"SuiteSemVerConstraints", fmt.Sprintf("[%s]", strings.Join(report.SuiteSemVerConstraints, ","))}, {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"SemVerFilter", report.SuiteConfig.SemVerFilter}, {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, @@ -207,6 +212,10 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit owner = matches[1] } } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 && !config.OmitSpecSemVerConstraints { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = strings.TrimSpace(name) test := JUnitTestCase{ diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index e990ad82e13..55e1d1f4f79 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -38,9 +38,13 @@ func GenerateTeamcityReport(report types.Report, dst string) error { name := report.SuiteDescription labels := report.SuiteLabels + semVerConstraints := report.SuiteSemVerConstraints if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) for _, spec := range report.SpecReports { name := fmt.Sprintf("[%s]", spec.LeafNodeType) @@ -51,6 +55,10 @@ func GenerateTeamcityReport(report types.Report, dst string) error { if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = tcEscape(name) fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/types/around_node.go b/e2e/vendor/github.com/onsi/ginkgo/v2/types/around_node.go new file mode 100644 index 00000000000..a069e0623d2 --- /dev/null +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/types/around_node.go @@ -0,0 +1,56 @@ +package types + +import ( + "context" +) + +type AroundNodeAllowedFuncs interface { + ~func(context.Context, func(context.Context)) | ~func(context.Context) context.Context | ~func() +} +type AroundNodeFunc func(ctx context.Context, body func(ctx context.Context)) + +func AroundNode[F AroundNodeAllowedFuncs](f F, cl CodeLocation) AroundNodeDecorator { + if f == nil { + panic("BuildAroundNode cannot be called with a nil function.") + } + var aroundNodeFunc func(context.Context, func(context.Context)) + switch x := any(f).(type) { + case func(context.Context, func(context.Context)): + aroundNodeFunc = x + case func(context.Context) context.Context: + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + ctx = x(ctx) + body(ctx) + } + case func(): + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + x() + body(ctx) + } + } + + return AroundNodeDecorator{ + Body: aroundNodeFunc, + CodeLocation: cl, + } +} + +type AroundNodeDecorator struct { + Body AroundNodeFunc + CodeLocation CodeLocation +} + +type AroundNodes []AroundNodeDecorator + +func (an AroundNodes) Clone() AroundNodes { + out := make(AroundNodes, len(an)) + copy(out, an) + return out +} + +func (an AroundNodes) Append(other ...AroundNodeDecorator) AroundNodes { + out := make(AroundNodes, len(an)+len(other)) + copy(out, an) + copy(out[len(an):], other) + return out +} diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/types/config.go b/e2e/vendor/github.com/onsi/ginkgo/v2/types/config.go index 2e827efe307..b99a9e15e96 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -24,6 +24,7 @@ type SuiteConfig struct { FocusFiles []string SkipFiles []string LabelFilter string + SemVerFilter string FailOnPending bool FailOnEmpty bool FailFast bool @@ -308,6 +309,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.SemVerFilter", Name: "sem-ver-filter", SectionKey: "filter", UsageArgument: "version", + Usage: "If set, ginkgo will only run specs with semantic version constraints that are satisfied by the provided version. e.g. '2.1.0'"}, {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", @@ -443,6 +446,13 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re } } + if suiteConfig.SemVerFilter != "" { + _, err := ParseSemVerFilter(suiteConfig.SemVerFilter) + if err != nil { + errors = append(errors, err) + } + } + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { case "", "dup", "swap", "none": default: @@ -573,6 +583,9 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, +} + +var GoBuildOFlags = GinkgoFlags{ {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", Usage: "output binary path (including name)."}, } @@ -673,7 +686,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( - GoBuildFlags, + GoBuildFlags.CopyAppend(GoBuildOFlags...), map[string]any{ "Go": &goFlagsConfig, }, @@ -763,6 +776,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { flags := GinkgoCLISharedFlags flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoBuildOFlags...) bindings := map[string]any{ "C": cliConfig, diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/e2e/vendor/github.com/onsi/ginkgo/v2/types/errors.go index c2796b5490a..59313238cfc 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -432,6 +432,24 @@ func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { } } +func (g ginkgoErrors) InvalidSemVerConstraint(semVerConstraint, errMsg string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid SemVerConstraint", + Message: fmt.Sprintf("'%s' is an invalid SemVerConstraint: %s", semVerConstraint, errMsg), + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + +func (g ginkgoErrors) InvalidEmptySemVerConstraint(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty SemVerConstraint", + Message: "SemVerConstraint cannot be empty", + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + /* Table errors */ func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { return GinkgoError{ diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go b/e2e/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go new file mode 100644 index 00000000000..3fc2ed144bd --- /dev/null +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go @@ -0,0 +1,60 @@ +package types + +import ( + "fmt" + + "github.com/Masterminds/semver/v3" +) + +type SemVerFilter func([]string) bool + +func MustParseSemVerFilter(input string) SemVerFilter { + filter, err := ParseSemVerFilter(input) + if err != nil { + panic(err) + } + return filter +} + +func ParseSemVerFilter(filterVersion string) (SemVerFilter, error) { + if filterVersion == "" { + return func(_ []string) bool { return true }, nil + } + + targetVersion, err := semver.NewVersion(filterVersion) + if err != nil { + return nil, fmt.Errorf("invalid filter version: %w", err) + } + + return func(constraints []string) bool { + // unconstrained specs always run + if len(constraints) == 0 { + return true + } + + for _, constraintStr := range constraints { + constraint, err := semver.NewConstraint(constraintStr) + if err != nil { + return false + } + + if !constraint.Check(targetVersion) { + return false + } + } + + return true + }, nil +} + +func ValidateAndCleanupSemVerConstraint(semVerConstraint string, cl CodeLocation) (string, error) { + if len(semVerConstraint) == 0 { + return "", GinkgoErrors.InvalidEmptySemVerConstraint(cl) + } + _, err := semver.NewConstraint(semVerConstraint) + if err != nil { + return "", GinkgoErrors.InvalidSemVerConstraint(semVerConstraint, err.Error(), cl) + } + + return semVerConstraint, nil +} diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/types/types.go b/e2e/vendor/github.com/onsi/ginkgo/v2/types/types.go index ddcbec1ba8a..b8e864a5d26 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "slices" "sort" "strings" "time" @@ -30,6 +31,9 @@ type Report struct { //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function SuiteLabels []string + //SuiteSemVerConstraints captures any semVerConstraints attached to the suite by the DSL's RunSpecs() function + SuiteSemVerConstraints []string + //SuiteSucceeded captures the success or failure status of the test run //If true, the test run is considered successful. //If false, the test run is considered unsuccessful @@ -129,13 +133,18 @@ type SpecReport struct { // all Describe/Context/When containers in this spec's hierarchy ContainerHierarchyLabels [][]string - // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // LeafNodeType, LeafNodeLocation, LeafNodeLabels, LeafNodeSemVerConstraints and LeafNodeText capture the NodeType, CodeLocation, and text // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be // one of the NodeTypesForSuiteLevelNodes node types) - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string // State captures whether the spec has passed, failed, etc. State SpecState @@ -198,48 +207,52 @@ type SpecReport struct { func (report SpecReport) MarshalJSON() ([]byte, error) { //All this to avoid emitting an empty Failure struct in the JSON out := struct { - ContainerHierarchyTexts []string - ContainerHierarchyLocations []CodeLocation - ContainerHierarchyLabels [][]string - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string - State SpecState - StartTime time.Time - EndTime time.Time - RunTime time.Duration - ParallelProcess int - Failure *Failure `json:",omitempty"` - NumAttempts int - MaxFlakeAttempts int - MaxMustPassRepeatedly int - CapturedGinkgoWriterOutput string `json:",omitempty"` - CapturedStdOutErr string `json:",omitempty"` - ReportEntries ReportEntries `json:",omitempty"` - ProgressReports []ProgressReport `json:",omitempty"` - AdditionalFailures []AdditionalFailure `json:",omitempty"` - SpecEvents SpecEvents `json:",omitempty"` + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + ContainerHierarchySemVerConstraints [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ - ContainerHierarchyTexts: report.ContainerHierarchyTexts, - ContainerHierarchyLocations: report.ContainerHierarchyLocations, - ContainerHierarchyLabels: report.ContainerHierarchyLabels, - LeafNodeType: report.LeafNodeType, - LeafNodeLocation: report.LeafNodeLocation, - LeafNodeLabels: report.LeafNodeLabels, - LeafNodeText: report.LeafNodeText, - State: report.State, - StartTime: report.StartTime, - EndTime: report.EndTime, - RunTime: report.RunTime, - ParallelProcess: report.ParallelProcess, - Failure: nil, - ReportEntries: nil, - NumAttempts: report.NumAttempts, - MaxFlakeAttempts: report.MaxFlakeAttempts, - MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, - CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, - CapturedStdOutErr: report.CapturedStdOutErr, + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, } if !report.Failure.IsZero() { @@ -287,6 +300,9 @@ func (report SpecReport) FullText() string { if report.LeafNodeText != "" { texts = append(texts, report.LeafNodeText) } + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) return strings.Join(texts, " ") } @@ -312,6 +328,28 @@ func (report SpecReport) Labels() []string { return out } +// SemVerConstraints returns a deduped set of all the spec's SemVerConstraints. +func (report SpecReport) SemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for _, semVerConstraints := range report.ContainerHierarchySemVerConstraints { + for _, semVerConstraint := range semVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + } + for _, semVerConstraint := range report.LeafNodeSemVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + + return out +} + // MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) @@ -321,6 +359,15 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } +// MatchesSemVerFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesSemVerFilter(version string) (bool, error) { + filter, err := ParseSemVerFilter(version) + if err != nil { + return false, err + } + return filter(report.SemVerConstraints()), nil +} + // FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/types/version.go b/e2e/vendor/github.com/onsi/ginkgo/v2/types/version.go index 158ac2fd890..49f4a94a15f 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.23.4" +const VERSION = "2.25.2" diff --git a/e2e/vendor/github.com/onsi/gomega/CHANGELOG.md b/e2e/vendor/github.com/onsi/gomega/CHANGELOG.md index de9c957cc66..b7d7309f3f2 100644 --- a/e2e/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/e2e/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,13 @@ +## 1.38.2 + +- roll back to go 1.23.0 [c404969] + +## 1.38.1 + +### Fixes + +Numerous minor fixes and dependency bumps + ## 1.38.0 ### Features diff --git a/e2e/vendor/github.com/onsi/gomega/gomega_dsl.go b/e2e/vendor/github.com/onsi/gomega/gomega_dsl.go index 1f03e1f228d..fdba34ee9dd 100644 --- a/e2e/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/e2e/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.38.0" +const GOMEGA_VERSION = "1.38.2" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/e2e/vendor/github.com/onsi/gomega/internal/async_assertion.go b/e2e/vendor/github.com/onsi/gomega/internal/async_assertion.go index a3a646e4ad0..4121505b627 100644 --- a/e2e/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/e2e/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -452,7 +452,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } else { var fgErr formattedGomegaError - if errors.As(actualErr, &fgErr) { + if errors.As(matcherErr, &fgErr) { message += fgErr.FormattedGomegaError() + "\n" } else { message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) diff --git a/e2e/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/e2e/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 532fc374491..ce74eee4c70 100644 --- a/e2e/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/e2e/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -2,6 +2,7 @@ package matchers import ( "bytes" + "errors" "fmt" "github.com/google/go-cmp/cmp" @@ -32,7 +33,7 @@ func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr if err, ok := r.(error); ok { matchErr = err } else if errMsg, ok := r.(string); ok { - matchErr = fmt.Errorf(errMsg) + matchErr = errors.New(errMsg) } } }() diff --git a/e2e/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/e2e/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 95057c26cc7..c3da9bd48b2 100644 --- a/e2e/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/e2e/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/onsi/gomega/format" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) type MatchYAMLMatcher struct { diff --git a/e2e/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/e2e/vendor/go.opentelemetry.io/otel/.clomonitor.yml new file mode 100644 index 00000000000..128d61a2260 --- /dev/null +++ b/e2e/vendor/go.opentelemetry.io/otel/.clomonitor.yml @@ -0,0 +1,3 @@ +exemptions: + - check: artifacthub_badge + reason: "Artifact Hub doesn't support Go packages" diff --git a/e2e/vendor/go.opentelemetry.io/otel/.golangci.yml b/e2e/vendor/go.opentelemetry.io/otel/.golangci.yml index 888e5da8028..5f69cc027c2 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/e2e/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -66,8 +66,6 @@ linters: desc: Do not use cross-module internal packages. - pkg: go.opentelemetry.io/otel/internal/internaltest desc: Do not use cross-module internal packages. - - pkg: go.opentelemetry.io/otel/internal/matchers - desc: Do not use cross-module internal packages. otlp-internal: files: - '!**/exporters/otlp/internal/**/*.go' @@ -190,6 +188,10 @@ linters: - legacy - std-error-handling rules: + - linters: + - revive + path: schema/v.*/types/.* + text: avoid meaningless package names # TODO: Having appropriate comments for exported objects helps development, # even for objects in internal packages. Appropriate comments for all # exported objects should be added and this exclusion removed. diff --git a/e2e/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/e2e/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 648e4abab8c..4acc75701b7 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/e2e/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,61 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.37.0/0.59.0/0.13.0] 2025-06-25 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.33.0` package. + The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799) +- The `go.opentelemetry.io/otel/semconv/v1.34.0` package. + The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812) +- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825) +- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839) + +### Changed + +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836) +- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864) +- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898) + +### Fixed + +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710) +- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822) +- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914) + +### Removed + +- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770) + +## [0.12.2] 2025-05-22 + +### Fixed + +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804) + +## [0.12.1] 2025-05-21 + +### Fixes + +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800) + ## [1.36.0/0.58.0/0.12.0] 2025-05-20 ### Added @@ -3288,7 +3343,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD +[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 +[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 +[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 [1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 [1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 diff --git a/e2e/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/e2e/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 1902dac057a..f9ddc281fc7 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/e2e/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -109,10 +109,9 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * The qualified approvals need to be from [Approver]s/[Maintainer]s - affiliated with different companies. Two qualified approvals from - [Approver]s or [Maintainer]s affiliated with the same company counts as a - single qualified approval. + * At least one of the qualified approvals need to be from an + [Approver]/[Maintainer] affiliated with a different company than the author + of the PR. * PRs introducing changes that have already been discussed and consensus reached only need one qualified approval. The discussion and resolution needs to be linked to the PR. @@ -650,11 +649,11 @@ should be canceled. ### Maintainers -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [David Ashpole](https://github.com/dashpole), Google -- [Robert Pająk](https://github.com/pellared), Splunk -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [Tyler Yahn](https://github.com/MrAlias), Splunk +- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) +- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70)) +- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2)) +- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) +- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) ### Emeritus diff --git a/e2e/vendor/go.opentelemetry.io/otel/Makefile b/e2e/vendor/go.opentelemetry.io/otel/Makefile index 62a56f4d341..4fa423ca02d 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/Makefile +++ b/e2e/vendor/go.opentelemetry.io/otel/Makefile @@ -293,7 +293,7 @@ semconv-generate: $(SEMCONVKIT) --param tag=$(TAG) \ go \ /home/weaver/target - $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)" .PHONY: gorelease gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) diff --git a/e2e/vendor/go.opentelemetry.io/otel/README.md b/e2e/vendor/go.opentelemetry.io/otel/README.md index b6007881212..5fa1b75c60e 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/README.md +++ b/e2e/vendor/go.opentelemetry.io/otel/README.md @@ -7,6 +7,7 @@ [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). diff --git a/e2e/vendor/go.opentelemetry.io/otel/RELEASING.md b/e2e/vendor/go.opentelemetry.io/otel/RELEASING.md index 7c1a9119dcc..1ddcdef0396 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/e2e/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -112,6 +112,29 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. +### Sign the Release Artifact + +To ensure we comply with CNCF best practices, we need to sign the release artifact. +The tarball attached to the GitHub release needs to be signed with your GPG key. + +Follow [these steps] to sign the release artifact and upload it to GitHub. +You can use [this script] to verify the contents of the tarball before signing it. + +Be sure to use the correct GPG key when signing the release artifact. + +```terminal +gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz +``` + +You can verify the signature with: + +```terminal +gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz +``` + +[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases +[this script]: https://github.com/MrAlias/attest-sh + ## Post-Release ### Contrib Repository diff --git a/e2e/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/e2e/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index 51fb76b30d0..935bd487631 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/e2e/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.3-slim-bullseye@sha256:9e3f9243e06fd68eb9519074b49878eda20ad39a855fac51aaffb741de20726e AS python -FROM otel/weaver:v0.15.0@sha256:1cf1c72eaed57dad813c2e359133b8a15bd4facf305aae5b13bdca6d3eccff56 AS weaver +FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python +FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cf3c88e15cd..cefe4ab914a 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type ( diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 5ecd859a52d..0d8619715e6 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type containerIDProvider func() (string, error) diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 813f0562424..16a062ad8cb 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) const ( diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 2d0f65498a0..78190392385 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type hostIDProvider func() (string, error) diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 8a48ab4fa32..01b4d27a038 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type osDescriptionProvider func() (string, error) diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 085fe68fd77..6712ce80d5c 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type ( diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 6872cbb4e7a..6966ed861e6 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -5,6 +5,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "sync" "sync/atomic" "time" @@ -267,7 +268,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if bsp.o.ExportTimeout > 0 { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout")) defer cancel() } diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 8f4fc385082..1785a4bbb0a 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) diff --git a/e2e/vendor/go.opentelemetry.io/otel/sdk/version.go b/e2e/vendor/go.opentelemetry.io/otel/sdk/version.go index 1af257449ac..c0217af6b9a 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/e2e/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md new file mode 100644 index 00000000000..02b56115e3c --- /dev/null +++ b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md @@ -0,0 +1,4 @@ + +# Migration from v1.33.0 to v1.34.0 + +The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`. diff --git a/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md new file mode 100644 index 00000000000..fab06c97526 --- /dev/null +++ b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.34.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.34.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0) diff --git a/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go new file mode 100644 index 00000000000..5b56662573a --- /dev/null +++ b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go @@ -0,0 +1,13851 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found [here]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found [here]. +// +// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found [here] + // . + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [here]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(val string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // ec2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: az +const ( + // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic + // conventions. It represents the [Azure Resource Provider Namespace] as + // recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzNamespaceKey = attribute.Key("az.namespace") + + // AzServiceRequestIDKey is the attribute Key conforming to the + // "az.service_request_id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzServiceRequestIDKey = attribute.Key("az.service_request_id") +) + +// AzNamespace returns an attribute KeyValue conforming to the "az.namespace" +// semantic conventions. It represents the [Azure Resource Provider Namespace] as +// recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzNamespace(val string) attribute.KeyValue { + return AzNamespaceKey.String(val) +} + +// AzServiceRequestID returns an attribute KeyValue conforming to the +// "az.service_request_id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzServiceRequestID(val string) attribute.KeyValue { + return AzServiceRequestIDKey.String(val) +} + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // bounded_staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // consistent_prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // all + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // each_quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // local_quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // one + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // local_one + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // local_serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container runtime + // managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container runtime +// managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // user + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // system + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // iowait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found [here] + // . + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [here]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: development + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: development + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: development + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: development + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: development + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: development + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: development + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: development + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: development + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifer" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.request.service_tier" semantic conventions. It represents the + // service tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") + + // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") + + // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to + // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It + // represents a fingerprint to track any eventual change in the Generative AI + // environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as identified + // by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: openai + // Note: The `gen_ai.system` describes a family of GenAI models with specific + // model identified + // by `gen_ai.request.model` and `gen_ai.response.model` attributes. + // + // The actual GenAI product may differ from the one identified by the client. + // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI + // client + // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge, instead of the actual system. The + // `server.address` + // attribute may help identify the actual system in use for `openai`. + // + // For custom model, a custom friendly name SHOULD be used. + // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` + // . + GenAISystemKey = attribute.Key("gen_ai.system") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "gen_ai.openai.response.service_tier" semantic conventions. It represents the +// service tier used for the response. +func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue { + return GenAIOpenAIResponseServiceTierKey.String(val) +} + +// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming +// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It +// represents a fingerprint to track any eventual change in the Generative AI +// environment. +func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return GenAIOpenAIResponseSystemFingerprintKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default") +) + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.system +var ( + // OpenAI + // Stability: development + GenAISystemOpenAI = GenAISystemKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai") + // Vertex AI + // Stability: development + GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai") + // Gemini + // Stability: development + GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini") + // Deprecated: Use 'gcp.vertex_ai' instead. + GenAISystemVertexAI = GenAISystemKey.String("vertex_ai") + // Deprecated: Use 'gcp.gemini' instead. + GenAISystemGemini = GenAISystemKey.String("gemini") + // Anthropic + // Stability: development + GenAISystemAnthropic = GenAISystemKey.String("anthropic") + // Cohere + // Stability: development + GenAISystemCohere = GenAISystemKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference") + // Azure OpenAI + // Stability: development + GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai") + // IBM Watsonx AI + // Stability: development + GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") + // AWS Bedrock + // Stability: development + GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") + // Perplexity + // Stability: development + GenAISystemPerplexity = GenAISystemKey.String("perplexity") + // xAI + // Stability: development + GenAISystemXai = GenAISystemKey.String("xai") + // DeepSeek + // Stability: development + GenAISystemDeepseek = GenAISystemKey.String("deepseek") + // Groq + // Stability: development + GenAISystemGroq = GenAISystemKey.String("groq") + // Mistral AI + // Stability: development + GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Deprecated: Replaced by `output`. + GenAITokenTypeCompletion = GenAITokenTypeKey.String("output") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") +) + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// Enum values for hw.state +var ( + // Ok + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") + // Deprecated: Replaced by `process`. + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver") + // Deprecated: Replaced by `send`. + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("z_os") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // used + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // Deprecated: Removed, report shared memory usage with + // `metric.system.memory.shared` metric. + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // Deprecated: Replaced by `gitea`. + VCSProviderNameGittea = VCSProviderNameKey.String("gittea") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} \ No newline at end of file diff --git a/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go new file mode 100644 index 00000000000..2c5c7ebd041 --- /dev/null +++ b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.34.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" diff --git a/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go new file mode 100644 index 00000000000..88a998f1e56 --- /dev/null +++ b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go new file mode 100644 index 00000000000..3c23d459254 --- /dev/null +++ b/e2e/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.34.0" diff --git a/e2e/vendor/go.opentelemetry.io/otel/trace/auto.go b/e2e/vendor/go.opentelemetry.io/otel/trace/auto.go index d90af8f673c..f3aa398138e 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/e2e/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) diff --git a/e2e/vendor/go.opentelemetry.io/otel/version.go b/e2e/vendor/go.opentelemetry.io/otel/version.go index ac3c0b15da2..7afe92b5981 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/version.go +++ b/e2e/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/e2e/vendor/go.opentelemetry.io/otel/versions.yaml b/e2e/vendor/go.opentelemetry.io/otel/versions.yaml index 79f82f3d058..9d4742a1764 100644 --- a/e2e/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/e2e/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,13 +3,12 @@ module-sets: stable-v1: - version: v1.36.0 + version: v1.37.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -23,14 +22,16 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.58.0 + version: v0.59.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.12.0 + version: v0.13.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/log/logtest - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog @@ -40,6 +41,4 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/log/logtest - - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/e2e/vendor/go.yaml.in/yaml/v3/LICENSE b/e2e/vendor/go.yaml.in/yaml/v3/LICENSE new file mode 100644 index 00000000000..2683e4bb1f2 --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/e2e/vendor/go.yaml.in/yaml/v3/NOTICE b/e2e/vendor/go.yaml.in/yaml/v3/NOTICE new file mode 100644 index 00000000000..866d74a7ad7 --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/e2e/vendor/go.yaml.in/yaml/v3/README.md b/e2e/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 00000000000..15a85a6350a --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/e2e/vendor/go.yaml.in/yaml/v3/apic.go b/e2e/vendor/go.yaml.in/yaml/v3/apic.go new file mode 100644 index 00000000000..05fd305da16 --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/e2e/vendor/go.yaml.in/yaml/v3/decode.go b/e2e/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 00000000000..02e2b17bfe0 --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/emitterc.go b/e2e/vendor/go.yaml.in/yaml/v3/emitterc.go new file mode 100644 index 00000000000..ab4e03ba726 --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -0,0 +1,2054 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { + if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak && !put_break(emitter) { + return false + } + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/encode.go b/e2e/vendor/go.yaml.in/yaml/v3/encode.go new file mode 100644 index 00000000000..de9e72a3e63 --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/parserc.go b/e2e/vendor/go.yaml.in/yaml/v3/parserc.go new file mode 100644 index 00000000000..25fe823637a --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/parserc.go @@ -0,0 +1,1274 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/readerc.go b/e2e/vendor/go.yaml.in/yaml/v3/readerc.go new file mode 100644 index 00000000000..56af245366f --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/resolve.go b/e2e/vendor/go.yaml.in/yaml/v3/resolve.go new file mode 100644 index 00000000000..64ae888057a --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/scannerc.go b/e2e/vendor/go.yaml.in/yaml/v3/scannerc.go new file mode 100644 index 00000000000..30b1f08920a --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -0,0 +1,3040 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/sorter.go b/e2e/vendor/go.yaml.in/yaml/v3/sorter.go new file mode 100644 index 00000000000..9210ece7e97 --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/writerc.go b/e2e/vendor/go.yaml.in/yaml/v3/writerc.go new file mode 100644 index 00000000000..266d0b092c0 --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/yaml.go b/e2e/vendor/go.yaml.in/yaml/v3/yaml.go new file mode 100644 index 00000000000..0b101cd20db --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/yaml.go @@ -0,0 +1,703 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/yamlh.go b/e2e/vendor/go.yaml.in/yaml/v3/yamlh.go new file mode 100644 index 00000000000..f59aa40f640 --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -0,0 +1,811 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yaml_parser_set_input(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yaml_emitter_set_output(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/e2e/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/e2e/vendor/go.yaml.in/yaml/v3/yamlprivateh.go new file mode 100644 index 00000000000..dea1ba9610d --- /dev/null +++ b/e2e/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/e2e/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/e2e/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index bc44b2c8e7e..a703cdfcf90 100644 --- a/e2e/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/e2e/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -85,6 +85,7 @@ type event struct { // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). // Type can be recovered from the sole bit in typ. +// [Tried this, wasn't faster. --adonovan] // Preorder visits all the nodes of the files supplied to New in // depth-first order. It calls f(n) for each node n before it visits diff --git a/e2e/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/e2e/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index be0f990a295..9852331a3db 100644 --- a/e2e/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/e2e/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - _ "unsafe" ) const ( diff --git a/e2e/vendor/google.golang.org/grpc/MAINTAINERS.md b/e2e/vendor/google.golang.org/grpc/MAINTAINERS.md index 5d4096d46a0..df35bb9a882 100644 --- a/e2e/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/e2e/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,19 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) -- [aranjans](https://github.com/aranjans), Google LLC - [arjan-bal](https://github.com/arjan-bal), Google LLC - [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [erm-g](https://github.com/erm-g), Google LLC - [gtcooke94](https://github.com/gtcooke94), Google LLC -- [purnesh42h](https://github.com/purnesh42h), Google LLC -- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez) +- [aranjans](https://github.com/aranjans) - [canguler](https://github.com/canguler) - [cesarghali](https://github.com/cesarghali) +- [erm-g](https://github.com/erm-g) - [iamqizhao](https://github.com/iamqizhao) - [jeanbza](https://github.com/jeanbza) - [jtattermusch](https://github.com/jtattermusch) @@ -32,5 +30,7 @@ for general contribution guidelines. - [matt-kwong](https://github.com/matt-kwong) - [menghanl](https://github.com/menghanl) - [nicolasnoble](https://github.com/nicolasnoble) +- [purnesh42h](https://github.com/purnesh42h) - [srini100](https://github.com/srini100) - [yongni](https://github.com/yongni) +- [zasweq](https://github.com/zasweq) diff --git a/e2e/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/e2e/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 0ad6bb1f220..360db08ebc1 100644 --- a/e2e/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/e2e/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -37,6 +37,8 @@ import ( "google.golang.org/grpc/resolver" ) +var randIntN = rand.IntN + // ChildState is the balancer state of a child along with the endpoint which // identifies the child balancer. type ChildState struct { @@ -112,6 +114,21 @@ type endpointSharding struct { mu sync.Mutex } +// rotateEndpoints returns a slice of all the input endpoints rotated a random +// amount. +func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint { + les := len(es) + if les == 0 { + return es + } + r := randIntN(les) + // Make a copy to avoid mutating data beyond the end of es. + ret := make([]resolver.Endpoint, les) + copy(ret, es[r:]) + copy(ret[les-r:], es[:r]) + return ret +} + // UpdateClientConnState creates a child for new endpoints and deletes children // for endpoints that are no longer present. It also updates all the children, // and sends a single synchronous update of the childrens' aggregated state at @@ -133,7 +150,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState newChildren := resolver.NewEndpointMap[*balancerWrapper]() // Update/Create new children. - for _, endpoint := range state.ResolverState.Endpoints { + for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) { if _, ok := newChildren.Get(endpoint); ok { // Endpoint child was already created, continue to avoid duplicate // update. @@ -279,7 +296,7 @@ func (es *endpointSharding) updateState() { p := &pickerWithChildStates{ pickers: pickers, childStates: childStates, - next: uint32(rand.IntN(len(pickers))), + next: uint32(randIntN(len(pickers))), } es.cc.UpdateState(balancer.State{ ConnectivityState: aggState, diff --git a/e2e/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/e2e/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index e62047256af..67f315a0dbc 100644 --- a/e2e/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/e2e/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -67,21 +67,21 @@ var ( disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.disconnections", Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "disconnection", + Unit: "{disconnection}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_succeeded", Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_failed", Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) diff --git a/e2e/vendor/google.golang.org/grpc/clientconn.go b/e2e/vendor/google.golang.org/grpc/clientconn.go index cd3eaf8ddcb..3f762285db7 100644 --- a/e2e/vendor/google.golang.org/grpc/clientconn.go +++ b/e2e/vendor/google.golang.org/grpc/clientconn.go @@ -208,7 +208,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.pickerWrapper = newPickerWrapper() cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) @@ -1076,13 +1076,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ - Ctx: ctx, - FullMethodName: method, - }) -} - func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. @@ -1831,7 +1824,7 @@ func (cc *ClientConn) initAuthority() error { } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { cc.authority = auth.OverrideAuthority(cc.parsedTarget) } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint + cc.authority = "localhost" + encodeAuthority(endpoint) } else { cc.authority = encodeAuthority(endpoint) } diff --git a/e2e/vendor/google.golang.org/grpc/credentials/credentials.go b/e2e/vendor/google.golang.org/grpc/credentials/credentials.go index a63ab606e66..c8e337cdda0 100644 --- a/e2e/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/e2e/vendor/google.golang.org/grpc/credentials/credentials.go @@ -96,10 +96,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. +// ProtocolInfo provides static information regarding transport credentials. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. + // + // Deprecated: this is unused by gRPC. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string @@ -109,7 +110,16 @@ type ProtocolInfo struct { // // Deprecated: please use Peer.AuthInfo. SecurityVersion string - // ServerName is the user-configured server name. + // ServerName is the user-configured server name. If set, this overrides + // the default :authority header used for all RPCs on the channel using the + // containing credentials, unless grpc.WithAuthority is set on the channel, + // in which case that setting will take precedence. + // + // This must be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: Users should use grpc.WithAuthority to override the authority + // on a channel instead of configuring the credentials. ServerName string } @@ -173,12 +183,17 @@ type TransportCredentials interface { // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials // OverrideServerName specifies the value used for the following: + // // - verifying the hostname on the returned certificates // - as SNI in the client's handshake to support virtual hosting // - as the value for `:authority` header at stream creation time // - // Deprecated: use grpc.WithAuthority instead. Will be supported - // throughout 1.x. + // The provided string should be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: this method is unused by gRPC. Users should use + // grpc.WithAuthority to override the authority on a channel instead of + // configuring the credentials. OverrideServerName(string) error } diff --git a/e2e/vendor/google.golang.org/grpc/credentials/tls.go b/e2e/vendor/google.golang.org/grpc/credentials/tls.go index 20f65f7bd95..8277be7d6f8 100644 --- a/e2e/vendor/google.golang.org/grpc/credentials/tls.go +++ b/e2e/vendor/google.golang.org/grpc/credentials/tls.go @@ -110,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := credinternal.CloneTLSConfig(c.config) - if cfg.ServerName == "" { - serverName, _, err := net.SplitHostPort(authority) - if err != nil { - // If the authority had no host port or if the authority cannot be parsed, use it as-is. - serverName = authority - } - cfg.ServerName = serverName + + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority } + cfg.ServerName = serverName + conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { @@ -259,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config { // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } @@ -271,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := os.ReadFile(certFile) if err != nil { diff --git a/e2e/vendor/google.golang.org/grpc/dialoptions.go b/e2e/vendor/google.golang.org/grpc/dialoptions.go index ec0ca89ccdc..7a5ac2e7c49 100644 --- a/e2e/vendor/google.golang.org/grpc/dialoptions.go +++ b/e2e/vendor/google.golang.org/grpc/dialoptions.go @@ -608,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header and as the server name in authentication handshake. +// This overrides all other ways of setting authority on the channel, but can be +// overridden per-call by using grpc.CallAuthority. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a diff --git a/e2e/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/e2e/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 2fdaed88dbd..7e060f5ed13 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/e2e/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,26 +26,32 @@ import ( ) var ( - // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + // EnableTXTServiceConfig is set if the DNS resolver should perform TXT + // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not + // "false"). + EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true) + + // TXTErrIgnore is set if TXT errors should be ignored + // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // XDSFallbackSupport is the env variable that controls whether support for - // xDS fallback is turned on. If this is unset or is false, only the first - // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/e2e/vendor/google.golang.org/grpc/internal/internal.go b/e2e/vendor/google.golang.org/grpc/internal/internal.go index 3ac798e8e60..2699223a27f 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/internal.go +++ b/e2e/vendor/google.golang.org/grpc/internal/internal.go @@ -182,35 +182,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster - // Specifier Plugin for testing purposes, regardless of the XDSRLS environment - // variable. - // - // TODO: Remove this function once the RLS env var is removed. - RegisterRLSClusterSpecifierPluginForTesting func() - - // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster - // Specifier Plugin for testing purposes. This is needed because there is no way - // to unregister the RLS Cluster Specifier Plugin after registering it solely - // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). - // - // TODO: Remove this function once the RLS env var is removed. - UnregisterRLSClusterSpecifierPluginForTesting func() - - // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing - // purposes, regardless of the RBAC environment variable. - // - // TODO: Remove this function once the RBAC env var is removed. - RegisterRBACHTTPFilterForTesting func() - - // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for - // testing purposes. This is needed because there is no way to unregister the - // HTTP Filter after registering it solely for testing purposes using - // RegisterRBACHTTPFilterForTesting(). - // - // TODO: Remove this function once the RBAC env var is removed. - UnregisterRBACHTTPFilterForTesting func() - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) diff --git a/e2e/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/e2e/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index ba5c5a95d0d..ada5251cff3 100644 --- a/e2e/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/e2e/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig, } d.resolver, err = internal.NewNetResolver(target.URL.Host) @@ -181,8 +181,8 @@ type dnsResolver struct { // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool + wg sync.WaitGroup + enableServiceConfig bool } // ResolveNow invoke an immediate resolution of the target that this @@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if len(srv) > 0 { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } - if !d.disableServiceConfig { + if d.enableServiceConfig { state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil diff --git a/e2e/vendor/google.golang.org/grpc/picker_wrapper.go b/e2e/vendor/google.golang.org/grpc/picker_wrapper.go index a2d2a798d48..aa52bfe95fd 100644 --- a/e2e/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/e2e/vendor/google.golang.org/grpc/picker_wrapper.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -48,14 +47,11 @@ type pickerGeneration struct { // actions and unblock when there's a picker update. type pickerWrapper struct { // If pickerGen holds a nil pointer, the pickerWrapper is closed. - pickerGen atomic.Pointer[pickerGeneration] - statsHandlers []stats.Handler // to record blocking picker calls + pickerGen atomic.Pointer[pickerGeneration] } -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - pw := &pickerWrapper{ - statsHandlers: statsHandlers, - } +func newPickerWrapper() *pickerWrapper { + pw := &pickerWrapper{} pw.pickerGen.Store(&pickerGeneration{ blockingCh: make(chan struct{}), }) @@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { } } +type pick struct { + transport transport.ClientTransport // the selected transport + result balancer.PickResult // the contents of the pick from the LB policy + blocked bool // set if a picker call queued for a new picker +} + // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker @@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) { var ch chan struct{} var lastPickErr error + pickBlocked := false for { pg := pw.pickerGen.Load() if pg == nil { - return nil, balancer.PickResult{}, ErrClientConnClosing + return pick{}, ErrClientConnClosing } if pg.picker == nil { ch = pg.blockingCh @@ -127,9 +130,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + return pick{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + return pick{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // In the second case, the only way it will get to this conditional is // if there is a new picker. if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } + pickBlocked = true } ch = pg.blockingCh @@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, balancer.PickResult{}, dropError{error: err} + return pick{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + return pick{}, status.Error(codes.Unavailable, err.Error()) } acbw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil } - return t, pickResult, nil + return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/e2e/vendor/google.golang.org/grpc/resolver/resolver.go b/e2e/vendor/google.golang.org/grpc/resolver/resolver.go index b84ef26d46d..8e6af9514b6 100644 --- a/e2e/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/e2e/vendor/google.golang.org/grpc/resolver/resolver.go @@ -332,6 +332,11 @@ type AuthorityOverrider interface { // OverrideAuthority returns the authority to use for a ClientConn with the // given target. The implementation must generate it without blocking, // typically in line, and must keep it unchanged. + // + // The returned string must be a valid ":authority" header value, i.e. be + // encoded according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as + // necessary. OverrideAuthority(Target) string } diff --git a/e2e/vendor/google.golang.org/grpc/server.go b/e2e/vendor/google.golang.org/grpc/server.go index 70fe23f5502..1da2a542acd 100644 --- a/e2e/vendor/google.golang.org/grpc/server.go +++ b/e2e/vendor/google.golang.org/grpc/server.go @@ -1598,6 +1598,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), + desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, diff --git a/e2e/vendor/google.golang.org/grpc/stats/stats.go b/e2e/vendor/google.golang.org/grpc/stats/stats.go index baf7740efba..10bf998aa5b 100644 --- a/e2e/vendor/google.golang.org/grpc/stats/stats.go +++ b/e2e/vendor/google.golang.org/grpc/stats/stats.go @@ -64,15 +64,21 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} +// DelayedPickComplete indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +type DelayedPickComplete struct{} -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } +// IsClient indicates DelayedPickComplete is available on the client. +func (*DelayedPickComplete) IsClient() bool { return true } -func (*PickerUpdated) isRPCStats() {} +func (*DelayedPickComplete) isRPCStats() {} + +// PickerUpdated indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +// +// Deprecated: will be removed in a future release; use DelayedPickComplete +// instead. +type PickerUpdated = DelayedPickComplete // InPayload contains stats about an incoming payload. type InPayload struct { diff --git a/e2e/vendor/google.golang.org/grpc/stream.go b/e2e/vendor/google.golang.org/grpc/stream.go index ca6948926f9..d9bbd4c57cf 100644 --- a/e2e/vendor/google.golang.org/grpc/stream.go +++ b/e2e/vendor/google.golang.org/grpc/stream.go @@ -469,8 +469,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) func (a *csAttempt) getTransport() error { cs := a.cs - var err error - a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method} + pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo) + a.transport, a.pickResult = pick.transport, pick.result if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -481,6 +482,11 @@ func (a *csAttempt) getTransport() error { if a.trInfo != nil { a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } + if pick.blocked { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) + } + } return nil } @@ -1580,6 +1586,7 @@ type serverStream struct { s *transport.ServerStream p *parser codec baseCodec + desc *StreamDesc compressorV0 Compressor compressorV1 encoding.Compressor @@ -1588,6 +1595,8 @@ type serverStream struct { sendCompressorName string + recvFirstMsg bool // set after the first message is received + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1774,6 +1783,10 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, chc) } } + // Received no request msg for non-client streaming rpcs. + if !ss.desc.ClientStreams && !ss.recvFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC") + } return err } if err == io.ErrUnexpectedEOF { @@ -1781,6 +1794,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } return toRPCErr(err) } + ss.recvFirstMsg = true if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ @@ -1800,7 +1814,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, cm) } } - return nil + + if ss.desc.ClientStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-client-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + return nil + } else if err != nil { + return err + } + return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC") } // MethodFromServerStream returns the method string for the input stream. diff --git a/e2e/vendor/google.golang.org/grpc/version.go b/e2e/vendor/google.golang.org/grpc/version.go index 8b0e5f973d6..bc1eb290f69 100644 --- a/e2e/vendor/google.golang.org/grpc/version.go +++ b/e2e/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.74.2" +const Version = "1.75.0" diff --git a/e2e/vendor/modules.txt b/e2e/vendor/modules.txt index d489e322d47..7cb1b9609ae 100644 --- a/e2e/vendor/modules.txt +++ b/e2e/vendor/modules.txt @@ -4,6 +4,9 @@ cel.dev/expr # github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab ## explicit github.com/JeffAshton/win_pdh +# github.com/Masterminds/semver/v3 v3.4.0 +## explicit; go 1.21 +github.com/Masterminds/semver/v3 # github.com/Microsoft/go-winio v0.6.2 ## explicit; go 1.21 github.com/Microsoft/go-winio @@ -266,7 +269,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.23.4 +# github.com/onsi/ginkgo/v2 v2.25.2 ## explicit; go 1.23.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -288,7 +291,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.38.0 +# github.com/onsi/gomega v1.38.2 ## explicit; go 1.23.0 github.com/onsi/gomega github.com/onsi/gomega/format @@ -383,7 +386,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.36.0 +# go.opentelemetry.io/otel v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -396,6 +399,7 @@ go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.26.0 +go.opentelemetry.io/otel/semconv/v1.34.0 # go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 ## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -407,12 +411,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.36.0 +# go.opentelemetry.io/otel/metric v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.36.0 +# go.opentelemetry.io/otel/sdk v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -420,7 +424,7 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.36.0 +# go.opentelemetry.io/otel/trace v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -438,6 +442,9 @@ go.uber.org/automaxprocs go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs +# go.yaml.in/yaml/v3 v3.0.4 +## explicit; go 1.16 +go.yaml.in/yaml/v3 # golang.org/x/crypto v0.41.0 ## explicit; go 1.23.0 golang.org/x/crypto/blowfish @@ -517,20 +524,20 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.35.0 +# golang.org/x/tools v0.36.0 ## explicit; go 1.23.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/edge golang.org/x/tools/go/ast/inspector -# google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a +# google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.74.2 +# google.golang.org/grpc v1.75.0 ## explicit; go 1.23.0 google.golang.org/grpc google.golang.org/grpc/attributes @@ -710,7 +717,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.33.1 => k8s.io/apiextensions-apiserver v0.33.2 +# k8s.io/apiextensions-apiserver v0.33.4 => k8s.io/apiextensions-apiserver v0.33.2 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -1246,7 +1253,7 @@ k8s.io/cri-client/pkg k8s.io/cri-client/pkg/internal k8s.io/cri-client/pkg/logs k8s.io/cri-client/pkg/util -# k8s.io/csi-translation-lib v0.33.1 => k8s.io/csi-translation-lib v0.33.2 +# k8s.io/csi-translation-lib v0.33.4 => k8s.io/csi-translation-lib v0.33.2 ## explicit; go 1.24.0 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins @@ -1462,7 +1469,7 @@ k8s.io/kubernetes/test/utils/kubeconfig k8s.io/kubernetes/third_party/forked/golang/expansion k8s.io/kubernetes/third_party/forked/libcontainer/apparmor k8s.io/kubernetes/third_party/forked/libcontainer/utils -# k8s.io/mount-utils v0.33.3 +# k8s.io/mount-utils v0.33.4 ## explicit; go 1.24.0 k8s.io/mount-utils # k8s.io/pod-security-admission v0.33.4 From e4addb4b3b64cbca779c7720c170c153601cefda Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Mon, 18 Aug 2025 10:24:33 +0200 Subject: [PATCH 048/157] rbd: use github.com/avast/retry instead of kubernetes/wait Reduce the dependency on Kubernetes libraries, use a simpler module for retrying functions. Signed-off-by: Niels de Vos --- go.mod | 1 + go.sum | 2 + internal/rbd/rbd_attach.go | 76 +-- internal/rbd/rbd_util.go | 13 +- .../github.com/avast/retry-go/v4/.gitignore | 21 + .../avast/retry-go/v4/.godocdown.tmpl | 38 ++ vendor/github.com/avast/retry-go/v4/LICENSE | 21 + vendor/github.com/avast/retry-go/v4/Makefile | 59 +++ vendor/github.com/avast/retry-go/v4/README.md | 494 ++++++++++++++++++ vendor/github.com/avast/retry-go/v4/VERSION | 1 + .../github.com/avast/retry-go/v4/current.txt | 26 + .../github.com/avast/retry-go/v4/generic.txt | 46 ++ .../github.com/avast/retry-go/v4/options.go | 280 ++++++++++ vendor/github.com/avast/retry-go/v4/retry.go | 347 ++++++++++++ vendor/modules.txt | 3 + 15 files changed, 1384 insertions(+), 44 deletions(-) create mode 100644 vendor/github.com/avast/retry-go/v4/.gitignore create mode 100644 vendor/github.com/avast/retry-go/v4/.godocdown.tmpl create mode 100644 vendor/github.com/avast/retry-go/v4/LICENSE create mode 100644 vendor/github.com/avast/retry-go/v4/Makefile create mode 100644 vendor/github.com/avast/retry-go/v4/README.md create mode 100644 vendor/github.com/avast/retry-go/v4/VERSION create mode 100644 vendor/github.com/avast/retry-go/v4/current.txt create mode 100644 vendor/github.com/avast/retry-go/v4/generic.txt create mode 100644 vendor/github.com/avast/retry-go/v4/options.go create mode 100644 vendor/github.com/avast/retry-go/v4/retry.go diff --git a/go.mod b/go.mod index e0c75635a80..0a563dbb054 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0 github.com/IBM/keyprotect-go-client v0.15.1 + github.com/avast/retry-go/v4 v4.6.1 github.com/aws/aws-sdk-go v1.55.8 github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 diff --git a/go.sum b/go.sum index 37428f0c1ef..839cbbde020 100644 --- a/go.sum +++ b/go.sum @@ -103,6 +103,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= +github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= github.com/aws/aws-sdk-go v1.44.164/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= diff --git a/internal/rbd/rbd_attach.go b/internal/rbd/rbd_attach.go index a2c76ebab7f..c7001987238 100644 --- a/internal/rbd/rbd_attach.go +++ b/internal/rbd/rbd_attach.go @@ -23,15 +23,14 @@ import ( "os" "slices" "strings" - "time" "github.com/ceph/ceph-csi/pkg/util/kernel" "github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log" + "github.com/avast/retry-go/v4" "github.com/container-storage-interface/spec/lib/go/csi" - "k8s.io/apimachinery/pkg/util/wait" ) const ( @@ -211,19 +210,30 @@ func findDeviceMappingImage(ctx context.Context, pool, namespace, image string, } // Stat a path, if it doesn't exist, retry maxRetries times. -func waitForPath(ctx context.Context, pool, namespace, image string, maxRetries int, useNbdDriver bool) (string, bool) { - for i := range maxRetries { - if i != 0 { - time.Sleep(time.Second) - } +func waitForPath( + ctx context.Context, + pool, namespace, image string, + maxRetries uint, + useNbdDriver bool, +) (string, bool) { + devicePath, err := retry.DoWithData( + func() (string, error) { + device, found := findDeviceMappingImage(ctx, pool, namespace, image, useNbdDriver) + if found { + return device, nil + } - device, found := findDeviceMappingImage(ctx, pool, namespace, image, useNbdDriver) - if found { - return device, found - } + return "", fmt.Errorf("failed to find device for %s/%s/%s", pool, namespace, image) + }, + retry.Attempts(maxRetries), + ) + if err != nil { + log.ErrorLog(ctx, err.Error()) + + return "", false } - return "", false + return devicePath, true } // SetRbdNbdToolFeatures sets features available with rbd-nbd, and NBD module @@ -349,13 +359,7 @@ func attachRBDImage(ctx context.Context, volOptions *rbdVolume, device string, c devicePath, found := waitForPath(ctx, volOptions.Pool, volOptions.RadosNamespace, image, 1, useNBD) if !found { - backoff := wait.Backoff{ - Duration: rbdImageWatcherInitDelay, - Factor: rbdImageWatcherFactor, - Steps: rbdImageWatcherSteps, - } - - err = waitForrbdImage(ctx, backoff, volOptions) + err = waitForrbdImage(ctx, volOptions) if err != nil { return "", err } @@ -511,26 +515,28 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util. return devicePath, nil } -func waitForrbdImage(ctx context.Context, backoff wait.Backoff, volOptions *rbdVolume) error { +func waitForrbdImage(ctx context.Context, volOptions *rbdVolume) error { imagePath := volOptions.String() - err := wait.ExponentialBackoff(backoff, func() (bool, error) { - used, err := volOptions.isInUse() - if err != nil { - return false, fmt.Errorf("fail to check rbd image status: (%w)", err) - } - if (volOptions.DisableInUseChecks) && (used) { - log.UsefulLog(ctx, "valid multi-node attach requested, ignoring watcher in-use result") + err := retry.Do( + func() error { + used, err := volOptions.isInUse() + if err != nil { + return fmt.Errorf("fail to check rbd image status: (%w)", err) + } + if (volOptions.DisableInUseChecks) && (used) { + log.UsefulLog(ctx, "valid multi-node attach requested, ignoring watcher in-use result") - return used, nil - } + return nil + } else if used { + return fmt.Errorf("rbd image %s is still being used", imagePath) + } + + return nil + }, + // retry.BackOffDelay() is the default, initial delay 100ms, max 10 attempts + ) - return !used, nil - }) - // return error if rbd image has not become available for the specified timeout - if wait.Interrupted(err) { - return fmt.Errorf("rbd image %s is still being used", imagePath) - } // return error if any other errors were encountered during waiting for the image to become available return err } diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index c7d86766862..dbfa1b3d6d6 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -47,15 +47,10 @@ import ( ) const ( - // The following three values are used for 30 seconds timeout - // while waiting for RBD Watcher to expire. - rbdImageWatcherInitDelay = 1 * time.Second - rbdImageWatcherFactor = 1.4 - rbdImageWatcherSteps = 10 - rbdDefaultMounter = "rbd" - rbdNbdMounter = "rbd-nbd" - defaultLogDir = "/var/log/ceph" - defaultLogStrategy = "remove" // supports remove, compress and preserve + rbdDefaultMounter = "rbd" + rbdNbdMounter = "rbd-nbd" + defaultLogDir = "/var/log/ceph" + defaultLogStrategy = "remove" // supports remove, compress and preserve // Output strings returned during invocation of "ceph rbd task add remove " when // command is not supported by ceph manager. Used to check errors and recover when the command diff --git a/vendor/github.com/avast/retry-go/v4/.gitignore b/vendor/github.com/avast/retry-go/v4/.gitignore new file mode 100644 index 00000000000..c40eb23f950 --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/.gitignore @@ -0,0 +1,21 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# dep +vendor/ +Gopkg.lock + +# cover +coverage.txt diff --git a/vendor/github.com/avast/retry-go/v4/.godocdown.tmpl b/vendor/github.com/avast/retry-go/v4/.godocdown.tmpl new file mode 100644 index 00000000000..32b80df81b1 --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/.godocdown.tmpl @@ -0,0 +1,38 @@ +# {{ .Name }} + +[![Release](https://img.shields.io/github/release/avast/retry-go.svg?style=flat-square)](https://github.com/avast/retry-go/releases/latest) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square)](LICENSE.md) +![GitHub Actions](https://github.com/avast/retry-go/actions/workflows/workflow.yaml/badge.svg) +[![Go Report Card](https://goreportcard.com/badge/github.com/avast/retry-go?style=flat-square)](https://goreportcard.com/report/github.com/avast/retry-go) +[![Go Reference](https://pkg.go.dev/badge/github.com/avast/retry-go/v4.svg)](https://pkg.go.dev/github.com/avast/retry-go/v4) +[![codecov.io](https://codecov.io/github/avast/retry-go/coverage.svg?branch=master)](https://codecov.io/github/avast/retry-go?branch=master) +[![Sourcegraph](https://sourcegraph.com/github.com/avast/retry-go/-/badge.svg)](https://sourcegraph.com/github.com/avast/retry-go?badge) + +{{ .EmitSynopsis }} + +{{ .EmitUsage }} + +## Contributing + +Contributions are very much welcome. + +### Makefile + +Makefile provides several handy rules, like README.md `generator` , `setup` for prepare build/dev environment, `test`, `cover`, etc... + +Try `make help` for more information. + +### Before pull request + +> maybe you need `make setup` in order to setup environment + +please try: +* run tests (`make test`) +* run linter (`make lint`) +* if your IDE don't automaticaly do `go fmt`, run `go fmt` (`make fmt`) + +### README + +README.md are generate from template [.godocdown.tmpl](.godocdown.tmpl) and code documentation via [godocdown](https://github.com/robertkrimen/godocdown). + +Never edit README.md direct, because your change will be lost. diff --git a/vendor/github.com/avast/retry-go/v4/LICENSE b/vendor/github.com/avast/retry-go/v4/LICENSE new file mode 100644 index 00000000000..f63fca814f9 --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Avast + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/avast/retry-go/v4/Makefile b/vendor/github.com/avast/retry-go/v4/Makefile new file mode 100644 index 00000000000..86544d239a3 --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/Makefile @@ -0,0 +1,59 @@ +SOURCE_FILES?=$$(go list ./... | grep -v /vendor/) +TEST_PATTERN?=. +TEST_OPTIONS?= +VERSION?=$$(cat VERSION) +LINTER?=$$(which golangci-lint) +LINTER_VERSION=1.50.0 + +ifeq ($(OS),Windows_NT) + LINTER_FILE=golangci-lint-$(LINTER_VERSION)-windows-amd64.zip + LINTER_UNPACK= >| app.zip; unzip -j app.zip -d $$GOPATH/bin; rm app.zip +else ifeq ($(OS), Darwin) + LINTER_FILE=golangci-lint-$(LINTER_VERSION)-darwin-amd64.tar.gz + LINTER_UNPACK= | tar xzf - -C $$GOPATH/bin --wildcards --strip 1 "**/golangci-lint" +else + LINTER_FILE=golangci-lint-$(LINTER_VERSION)-linux-amd64.tar.gz + LINTER_UNPACK= | tar xzf - -C $$GOPATH/bin --wildcards --strip 1 "**/golangci-lint" +endif + +setup: + go install github.com/pierrre/gotestcover@latest + go install golang.org/x/tools/cmd/cover@latest + go install github.com/robertkrimen/godocdown/godocdown@latest + go mod download + +generate: ## Generate README.md + godocdown >| README.md + +test: generate test_and_cover_report lint + +test_and_cover_report: + gotestcover $(TEST_OPTIONS) -covermode=atomic -coverprofile=coverage.txt $(SOURCE_FILES) -run $(TEST_PATTERN) -timeout=2m + +cover: test ## Run all the tests and opens the coverage report + go tool cover -html=coverage.txt + +fmt: ## gofmt and goimports all go files + find . -name '*.go' -not -wholename './vendor/*' | while read -r file; do gofmt -w -s "$$file"; goimports -w "$$file"; done + +lint: ## Run all the linters + @if [ "$(LINTER)" = "" ]; then\ + curl -L https://github.com/golangci/golangci-lint/releases/download/v$(LINTER_VERSION)/$(LINTER_FILE) $(LINTER_UNPACK) ;\ + chmod +x $$GOPATH/bin/golangci-lint;\ + fi + + golangci-lint run + +ci: test_and_cover_report ## Run all the tests but no linters - use https://golangci.com integration instead + +build: + go build + +release: ## Release new version + git tag | grep -q $(VERSION) && echo This version was released! Increase VERSION! || git tag $(VERSION) && git push origin $(VERSION) && git tag v$(VERSION) && git push origin v$(VERSION) + +# Absolutely awesome: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html +help: + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.DEFAULT_GOAL := build diff --git a/vendor/github.com/avast/retry-go/v4/README.md b/vendor/github.com/avast/retry-go/v4/README.md new file mode 100644 index 00000000000..b7303433953 --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/README.md @@ -0,0 +1,494 @@ +# retry + +[![Release](https://img.shields.io/github/release/avast/retry-go.svg?style=flat-square)](https://github.com/avast/retry-go/releases/latest) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square)](LICENSE.md) +![GitHub Actions](https://github.com/avast/retry-go/actions/workflows/workflow.yaml/badge.svg) +[![Go Report Card](https://goreportcard.com/badge/github.com/avast/retry-go?style=flat-square)](https://goreportcard.com/report/github.com/avast/retry-go) +[![Go Reference](https://pkg.go.dev/badge/github.com/avast/retry-go/v4.svg)](https://pkg.go.dev/github.com/avast/retry-go/v4) +[![codecov.io](https://codecov.io/github/avast/retry-go/coverage.svg?branch=master)](https://codecov.io/github/avast/retry-go?branch=master) +[![Sourcegraph](https://sourcegraph.com/github.com/avast/retry-go/-/badge.svg)](https://sourcegraph.com/github.com/avast/retry-go?badge) + +Simple library for retry mechanism + +Slightly inspired by +[Try::Tiny::Retry](https://metacpan.org/pod/Try::Tiny::Retry) + +# SYNOPSIS + +HTTP GET with retry: + + url := "http://example.com" + var body []byte + + err := retry.Do( + func() error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + return nil + }, + ) + + if err != nil { + // handle error + } + + fmt.Println(string(body)) + +HTTP GET with retry with data: + + url := "http://example.com" + + body, err := retry.DoWithData( + func() ([]byte, error) { + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return body, nil + }, + ) + + if err != nil { + // handle error + } + + fmt.Println(string(body)) + +[More examples](https://github.com/avast/retry-go/tree/master/examples) + +# SEE ALSO + +* [giantswarm/retry-go](https://github.com/giantswarm/retry-go) - slightly +complicated interface. + +* [sethgrid/pester](https://github.com/sethgrid/pester) - only http retry for +http calls with retries and backoff + +* [cenkalti/backoff](https://github.com/cenkalti/backoff) - Go port of the +exponential backoff algorithm from Google's HTTP Client Library for Java. Really +complicated interface. + +* [rafaeljesus/retry-go](https://github.com/rafaeljesus/retry-go) - looks good, +slightly similar as this package, don't have 'simple' `Retry` method + +* [matryer/try](https://github.com/matryer/try) - very popular package, +nonintuitive interface (for me) + +# BREAKING CHANGES + +* 4.0.0 + + - infinity retry is possible by set `Attempts(0)` by PR [#49](https://github.com/avast/retry-go/pull/49) + +* 3.0.0 + + - `DelayTypeFunc` accepts a new parameter `err` - this breaking change affects only your custom Delay Functions. This change allow [make delay functions based on error](examples/delay_based_on_error_test.go). + +* 1.0.2 -> 2.0.0 + + - argument of `retry.Delay` is final delay (no multiplication by `retry.Units` anymore) + - function `retry.Units` are removed + - [more about this breaking change](https://github.com/avast/retry-go/issues/7) + +* 0.3.0 -> 1.0.0 + + - `retry.Retry` function are changed to `retry.Do` function + - `retry.RetryCustom` (OnRetry) and `retry.RetryCustomWithOpts` functions are now implement via functions produces Options (aka `retry.OnRetry`) + +## Usage + +#### func BackOffDelay + +```go +func BackOffDelay(n uint, _ error, config *Config) time.Duration +``` +BackOffDelay is a DelayType which increases delay between consecutive retries + +#### func Do + +```go +func Do(retryableFunc RetryableFunc, opts ...Option) error +``` + +#### func DoWithData + +```go +func DoWithData[T any](retryableFunc RetryableFuncWithData[T], opts ...Option) (T, error) +``` + +#### func FixedDelay + +```go +func FixedDelay(_ uint, _ error, config *Config) time.Duration +``` +FixedDelay is a DelayType which keeps delay the same through all iterations + +#### func IsRecoverable + +```go +func IsRecoverable(err error) bool +``` +IsRecoverable checks if error is an instance of `unrecoverableError` + +#### func RandomDelay + +```go +func RandomDelay(_ uint, _ error, config *Config) time.Duration +``` +RandomDelay is a DelayType which picks a random delay up to config.maxJitter + +#### func Unrecoverable + +```go +func Unrecoverable(err error) error +``` +Unrecoverable wraps an error in `unrecoverableError` struct + +#### type Config + +```go +type Config struct { +} +``` + + +#### type DelayTypeFunc + +```go +type DelayTypeFunc func(n uint, err error, config *Config) time.Duration +``` + +DelayTypeFunc is called to return the next delay to wait after the retriable +function fails on `err` after `n` attempts. + +#### func CombineDelay + +```go +func CombineDelay(delays ...DelayTypeFunc) DelayTypeFunc +``` +CombineDelay is a DelayType the combines all of the specified delays into a new +DelayTypeFunc + +#### type Error + +```go +type Error []error +``` + +Error type represents list of errors in retry + +#### func (Error) As + +```go +func (e Error) As(target interface{}) bool +``` + +#### func (Error) Error + +```go +func (e Error) Error() string +``` +Error method return string representation of Error It is an implementation of +error interface + +#### func (Error) Is + +```go +func (e Error) Is(target error) bool +``` + +#### func (Error) Unwrap + +```go +func (e Error) Unwrap() error +``` +Unwrap the last error for compatibility with `errors.Unwrap()`. When you need to +unwrap all errors, you should use `WrappedErrors()` instead. + + err := Do( + func() error { + return errors.New("original error") + }, + Attempts(1), + ) + + fmt.Println(errors.Unwrap(err)) # "original error" is printed + +Added in version 4.2.0. + +#### func (Error) WrappedErrors + +```go +func (e Error) WrappedErrors() []error +``` +WrappedErrors returns the list of errors that this Error is wrapping. It is an +implementation of the `errwrap.Wrapper` interface in package +[errwrap](https://github.com/hashicorp/errwrap) so that `retry.Error` can be +used with that library. + +#### type OnRetryFunc + +```go +type OnRetryFunc func(attempt uint, err error) +``` + +Function signature of OnRetry function + +#### type Option + +```go +type Option func(*Config) +``` + +Option represents an option for retry. + +#### func Attempts + +```go +func Attempts(attempts uint) Option +``` +Attempts set count of retry. Setting to 0 will retry until the retried function +succeeds. default is 10 + +#### func AttemptsForError + +```go +func AttemptsForError(attempts uint, err error) Option +``` +AttemptsForError sets count of retry in case execution results in given `err` +Retries for the given `err` are also counted against total retries. The retry +will stop if any of given retries is exhausted. + +added in 4.3.0 + +#### func Context + +```go +func Context(ctx context.Context) Option +``` +Context allow to set context of retry default are Background context + +example of immediately cancellation (maybe it isn't the best example, but it +describes behavior enough; I hope) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + retry.Do( + func() error { + ... + }, + retry.Context(ctx), + ) + +#### func Delay + +```go +func Delay(delay time.Duration) Option +``` +Delay set delay between retry default is 100ms + +#### func DelayType + +```go +func DelayType(delayType DelayTypeFunc) Option +``` +DelayType set type of the delay between retries default is BackOff + +#### func LastErrorOnly + +```go +func LastErrorOnly(lastErrorOnly bool) Option +``` +return the direct last error that came from the retried function default is +false (return wrapped errors with everything) + +#### func MaxDelay + +```go +func MaxDelay(maxDelay time.Duration) Option +``` +MaxDelay set maximum delay between retry does not apply by default + +#### func MaxJitter + +```go +func MaxJitter(maxJitter time.Duration) Option +``` +MaxJitter sets the maximum random Jitter between retries for RandomDelay + +#### func OnRetry + +```go +func OnRetry(onRetry OnRetryFunc) Option +``` +OnRetry function callback are called each retry + +log each retry example: + + retry.Do( + func() error { + return errors.New("some error") + }, + retry.OnRetry(func(n uint, err error) { + log.Printf("#%d: %s\n", n, err) + }), + ) + +#### func RetryIf + +```go +func RetryIf(retryIf RetryIfFunc) Option +``` +RetryIf controls whether a retry should be attempted after an error (assuming +there are any retry attempts remaining) + +skip retry if special error example: + + retry.Do( + func() error { + return errors.New("special error") + }, + retry.RetryIf(func(err error) bool { + if err.Error() == "special error" { + return false + } + return true + }) + ) + +By default RetryIf stops execution if the error is wrapped using +`retry.Unrecoverable`, so above example may also be shortened to: + + retry.Do( + func() error { + return retry.Unrecoverable(errors.New("special error")) + } + ) + +#### func UntilSucceeded + +```go +func UntilSucceeded() Option +``` +UntilSucceeded will retry until the retried function succeeds. Equivalent to +setting Attempts(0). + +#### func WithTimer + +```go +func WithTimer(t Timer) Option +``` +WithTimer provides a way to swap out timer module implementations. This +primarily is useful for mocking/testing, where you may not want to explicitly +wait for a set duration for retries. + +example of augmenting time.After with a print statement + + type struct MyTimer {} + + func (t *MyTimer) After(d time.Duration) <- chan time.Time { + fmt.Print("Timer called!") + return time.After(d) + } + + retry.Do( + func() error { ... }, + retry.WithTimer(&MyTimer{}) + ) + +#### func WrapContextErrorWithLastError + +```go +func WrapContextErrorWithLastError(wrapContextErrorWithLastError bool) Option +``` +WrapContextErrorWithLastError allows the context error to be returned wrapped +with the last error that the retried function returned. This is only applicable +when Attempts is set to 0 to retry indefinitly and when using a context to +cancel / timeout + +default is false + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + retry.Do( + func() error { + ... + }, + retry.Context(ctx), + retry.Attempts(0), + retry.WrapContextErrorWithLastError(true), + ) + +#### type RetryIfFunc + +```go +type RetryIfFunc func(error) bool +``` + +Function signature of retry if function + +#### type RetryableFunc + +```go +type RetryableFunc func() error +``` + +Function signature of retryable function + +#### type RetryableFuncWithData + +```go +type RetryableFuncWithData[T any] func() (T, error) +``` + +Function signature of retryable function with data + +#### type Timer + +```go +type Timer interface { + After(time.Duration) <-chan time.Time +} +``` + +Timer represents the timer used to track time for a retry. + +## Contributing + +Contributions are very much welcome. + +### Makefile + +Makefile provides several handy rules, like README.md `generator` , `setup` for prepare build/dev environment, `test`, `cover`, etc... + +Try `make help` for more information. + +### Before pull request + +> maybe you need `make setup` in order to setup environment + +please try: +* run tests (`make test`) +* run linter (`make lint`) +* if your IDE don't automaticaly do `go fmt`, run `go fmt` (`make fmt`) + +### README + +README.md are generate from template [.godocdown.tmpl](.godocdown.tmpl) and code documentation via [godocdown](https://github.com/robertkrimen/godocdown). + +Never edit README.md direct, because your change will be lost. diff --git a/vendor/github.com/avast/retry-go/v4/VERSION b/vendor/github.com/avast/retry-go/v4/VERSION new file mode 100644 index 00000000000..8ac28bf9f0f --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/VERSION @@ -0,0 +1 @@ +4.6.1 diff --git a/vendor/github.com/avast/retry-go/v4/current.txt b/vendor/github.com/avast/retry-go/v4/current.txt new file mode 100644 index 00000000000..406b14fe8e4 --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/current.txt @@ -0,0 +1,26 @@ +goos: darwin +goarch: amd64 +pkg: github.com/avast/retry-go/v4 +cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz +BenchmarkDo-16 3 474128987 ns/op 2730 B/op 48 allocs/op +BenchmarkDo-16 3 441499631 ns/op 2725 B/op 47 allocs/op +BenchmarkDo-16 3 449390845 ns/op 2693 B/op 47 allocs/op +BenchmarkDo-16 3 488695333 ns/op 2725 B/op 47 allocs/op +BenchmarkDo-16 2 601685067 ns/op 2704 B/op 48 allocs/op +BenchmarkDo-16 3 336872997 ns/op 2693 B/op 47 allocs/op +BenchmarkDo-16 3 384347911 ns/op 2725 B/op 47 allocs/op +BenchmarkDo-16 3 480906307 ns/op 2693 B/op 47 allocs/op +BenchmarkDo-16 3 455362447 ns/op 2693 B/op 47 allocs/op +BenchmarkDo-16 3 443170384 ns/op 2693 B/op 47 allocs/op +BenchmarkDoNoErrors-16 6872852 159.4 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7650360 161.3 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7235683 159.3 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7465636 160.2 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7549692 160.7 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7510610 159.8 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7438124 160.3 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7416504 160.2 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7356183 160.4 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7393480 160.1 ns/op 208 B/op 4 allocs/op +PASS +ok github.com/avast/retry-go/v4 35.971s diff --git a/vendor/github.com/avast/retry-go/v4/generic.txt b/vendor/github.com/avast/retry-go/v4/generic.txt new file mode 100644 index 00000000000..116a09645fc --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/generic.txt @@ -0,0 +1,46 @@ +goos: darwin +goarch: amd64 +pkg: github.com/avast/retry-go/v4 +cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz +BenchmarkDo-16 3 406306609 ns/op 2701 B/op 48 allocs/op +BenchmarkDo-16 3 419470846 ns/op 2693 B/op 47 allocs/op +BenchmarkDo-16 2 567716303 ns/op 2696 B/op 47 allocs/op +BenchmarkDo-16 2 562713288 ns/op 2696 B/op 47 allocs/op +BenchmarkDo-16 3 418301987 ns/op 2693 B/op 47 allocs/op +BenchmarkDo-16 2 541207332 ns/op 2696 B/op 47 allocs/op +BenchmarkDo-16 2 526211617 ns/op 2696 B/op 47 allocs/op +BenchmarkDo-16 2 517419526 ns/op 2696 B/op 47 allocs/op +BenchmarkDo-16 3 478391497 ns/op 2693 B/op 47 allocs/op +BenchmarkDo-16 3 452548175 ns/op 2725 B/op 47 allocs/op +BenchmarkDoWithData-16 3 463040866 ns/op 2693 B/op 47 allocs/op +BenchmarkDoWithData-16 3 496158943 ns/op 2693 B/op 47 allocs/op +BenchmarkDoWithData-16 3 488367012 ns/op 2725 B/op 47 allocs/op +BenchmarkDoWithData-16 3 454618897 ns/op 2693 B/op 47 allocs/op +BenchmarkDoWithData-16 3 435430056 ns/op 2693 B/op 47 allocs/op +BenchmarkDoWithData-16 2 552289967 ns/op 2744 B/op 48 allocs/op +BenchmarkDoWithData-16 3 569748815 ns/op 2693 B/op 47 allocs/op +BenchmarkDoWithData-16 3 416597207 ns/op 2725 B/op 47 allocs/op +BenchmarkDoWithData-16 3 358455415 ns/op 2725 B/op 47 allocs/op +BenchmarkDoWithData-16 3 455297803 ns/op 2725 B/op 47 allocs/op +BenchmarkDoNoErrors-16 7035135 161.9 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7389806 161.3 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7394016 161.5 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7380039 162.2 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7424865 162.2 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7111860 160.5 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7285305 162.6 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7410627 160.7 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7340961 161.6 ns/op 208 B/op 4 allocs/op +BenchmarkDoNoErrors-16 7295727 164.1 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 7357304 159.9 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 6649852 166.9 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 6938404 176.3 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 7181965 160.4 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 7311484 166.2 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 6939157 169.7 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 6648344 179.0 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 6794847 177.0 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 6782588 171.4 ns/op 208 B/op 4 allocs/op +BenchmarkDoWithDataNoErrors-16 7279119 166.9 ns/op 208 B/op 4 allocs/op +PASS +ok github.com/avast/retry-go/v4 73.128s diff --git a/vendor/github.com/avast/retry-go/v4/options.go b/vendor/github.com/avast/retry-go/v4/options.go new file mode 100644 index 00000000000..5577ee7cbd0 --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/options.go @@ -0,0 +1,280 @@ +package retry + +import ( + "context" + "math" + "math/rand" + "time" +) + +// Function signature of retry if function +type RetryIfFunc func(error) bool + +// Function signature of OnRetry function +type OnRetryFunc func(attempt uint, err error) + +// DelayTypeFunc is called to return the next delay to wait after the retriable function fails on `err` after `n` attempts. +type DelayTypeFunc func(n uint, err error, config *Config) time.Duration + +// Timer represents the timer used to track time for a retry. +type Timer interface { + After(time.Duration) <-chan time.Time +} + +type Config struct { + attempts uint + attemptsForError map[error]uint + delay time.Duration + maxDelay time.Duration + maxJitter time.Duration + onRetry OnRetryFunc + retryIf RetryIfFunc + delayType DelayTypeFunc + lastErrorOnly bool + context context.Context + timer Timer + wrapContextErrorWithLastError bool + + maxBackOffN uint +} + +// Option represents an option for retry. +type Option func(*Config) + +func emptyOption(c *Config) {} + +// return the direct last error that came from the retried function +// default is false (return wrapped errors with everything) +func LastErrorOnly(lastErrorOnly bool) Option { + return func(c *Config) { + c.lastErrorOnly = lastErrorOnly + } +} + +// Attempts set count of retry. Setting to 0 will retry until the retried function succeeds. +// default is 10 +func Attempts(attempts uint) Option { + return func(c *Config) { + c.attempts = attempts + } +} + +// UntilSucceeded will retry until the retried function succeeds. Equivalent to setting Attempts(0). +func UntilSucceeded() Option { + return func(c *Config) { + c.attempts = 0 + } +} + +// AttemptsForError sets count of retry in case execution results in given `err` +// Retries for the given `err` are also counted against total retries. +// The retry will stop if any of given retries is exhausted. +// +// added in 4.3.0 +func AttemptsForError(attempts uint, err error) Option { + return func(c *Config) { + c.attemptsForError[err] = attempts + } +} + +// Delay set delay between retry +// default is 100ms +func Delay(delay time.Duration) Option { + return func(c *Config) { + c.delay = delay + } +} + +// MaxDelay set maximum delay between retry +// does not apply by default +func MaxDelay(maxDelay time.Duration) Option { + return func(c *Config) { + c.maxDelay = maxDelay + } +} + +// MaxJitter sets the maximum random Jitter between retries for RandomDelay +func MaxJitter(maxJitter time.Duration) Option { + return func(c *Config) { + c.maxJitter = maxJitter + } +} + +// DelayType set type of the delay between retries +// default is BackOff +func DelayType(delayType DelayTypeFunc) Option { + if delayType == nil { + return emptyOption + } + return func(c *Config) { + c.delayType = delayType + } +} + +// BackOffDelay is a DelayType which increases delay between consecutive retries +func BackOffDelay(n uint, _ error, config *Config) time.Duration { + // 1 << 63 would overflow signed int64 (time.Duration), thus 62. + const max uint = 62 + + if config.maxBackOffN == 0 { + if config.delay <= 0 { + config.delay = 1 + } + + config.maxBackOffN = max - uint(math.Floor(math.Log2(float64(config.delay)))) + } + + if n > config.maxBackOffN { + n = config.maxBackOffN + } + + return config.delay << n +} + +// FixedDelay is a DelayType which keeps delay the same through all iterations +func FixedDelay(_ uint, _ error, config *Config) time.Duration { + return config.delay +} + +// RandomDelay is a DelayType which picks a random delay up to config.maxJitter +func RandomDelay(_ uint, _ error, config *Config) time.Duration { + return time.Duration(rand.Int63n(int64(config.maxJitter))) +} + +// CombineDelay is a DelayType the combines all of the specified delays into a new DelayTypeFunc +func CombineDelay(delays ...DelayTypeFunc) DelayTypeFunc { + const maxInt64 = uint64(math.MaxInt64) + + return func(n uint, err error, config *Config) time.Duration { + var total uint64 + for _, delay := range delays { + total += uint64(delay(n, err, config)) + if total > maxInt64 { + total = maxInt64 + } + } + + return time.Duration(total) + } +} + +// OnRetry function callback are called each retry +// +// log each retry example: +// +// retry.Do( +// func() error { +// return errors.New("some error") +// }, +// retry.OnRetry(func(n uint, err error) { +// log.Printf("#%d: %s\n", n, err) +// }), +// ) +func OnRetry(onRetry OnRetryFunc) Option { + if onRetry == nil { + return emptyOption + } + return func(c *Config) { + c.onRetry = onRetry + } +} + +// RetryIf controls whether a retry should be attempted after an error +// (assuming there are any retry attempts remaining) +// +// skip retry if special error example: +// +// retry.Do( +// func() error { +// return errors.New("special error") +// }, +// retry.RetryIf(func(err error) bool { +// if err.Error() == "special error" { +// return false +// } +// return true +// }) +// ) +// +// By default RetryIf stops execution if the error is wrapped using `retry.Unrecoverable`, +// so above example may also be shortened to: +// +// retry.Do( +// func() error { +// return retry.Unrecoverable(errors.New("special error")) +// } +// ) +func RetryIf(retryIf RetryIfFunc) Option { + if retryIf == nil { + return emptyOption + } + return func(c *Config) { + c.retryIf = retryIf + } +} + +// Context allow to set context of retry +// default are Background context +// +// example of immediately cancellation (maybe it isn't the best example, but it describes behavior enough; I hope) +// +// ctx, cancel := context.WithCancel(context.Background()) +// cancel() +// +// retry.Do( +// func() error { +// ... +// }, +// retry.Context(ctx), +// ) +func Context(ctx context.Context) Option { + return func(c *Config) { + c.context = ctx + } +} + +// WithTimer provides a way to swap out timer module implementations. +// This primarily is useful for mocking/testing, where you may not want to explicitly wait for a set duration +// for retries. +// +// example of augmenting time.After with a print statement +// +// type struct MyTimer {} +// +// func (t *MyTimer) After(d time.Duration) <- chan time.Time { +// fmt.Print("Timer called!") +// return time.After(d) +// } +// +// retry.Do( +// func() error { ... }, +// retry.WithTimer(&MyTimer{}) +// ) +func WithTimer(t Timer) Option { + return func(c *Config) { + c.timer = t + } +} + +// WrapContextErrorWithLastError allows the context error to be returned wrapped with the last error that the +// retried function returned. This is only applicable when Attempts is set to 0 to retry indefinitly and when +// using a context to cancel / timeout +// +// default is false +// +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// retry.Do( +// func() error { +// ... +// }, +// retry.Context(ctx), +// retry.Attempts(0), +// retry.WrapContextErrorWithLastError(true), +// ) +func WrapContextErrorWithLastError(wrapContextErrorWithLastError bool) Option { + return func(c *Config) { + c.wrapContextErrorWithLastError = wrapContextErrorWithLastError + } +} diff --git a/vendor/github.com/avast/retry-go/v4/retry.go b/vendor/github.com/avast/retry-go/v4/retry.go new file mode 100644 index 00000000000..1812a1b03ed --- /dev/null +++ b/vendor/github.com/avast/retry-go/v4/retry.go @@ -0,0 +1,347 @@ +/* +Simple library for retry mechanism + +Slightly inspired by [Try::Tiny::Retry](https://metacpan.org/pod/Try::Tiny::Retry) + +# SYNOPSIS + +HTTP GET with retry: + + url := "http://example.com" + var body []byte + + err := retry.Do( + func() error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + return nil + }, + ) + + if err != nil { + // handle error + } + + fmt.Println(string(body)) + +HTTP GET with retry with data: + + url := "http://example.com" + + body, err := retry.DoWithData( + func() ([]byte, error) { + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return body, nil + }, + ) + + if err != nil { + // handle error + } + + fmt.Println(string(body)) + +[More examples](https://github.com/avast/retry-go/tree/master/examples) + +# SEE ALSO + +* [giantswarm/retry-go](https://github.com/giantswarm/retry-go) - slightly complicated interface. + +* [sethgrid/pester](https://github.com/sethgrid/pester) - only http retry for http calls with retries and backoff + +* [cenkalti/backoff](https://github.com/cenkalti/backoff) - Go port of the exponential backoff algorithm from Google's HTTP Client Library for Java. Really complicated interface. + +* [rafaeljesus/retry-go](https://github.com/rafaeljesus/retry-go) - looks good, slightly similar as this package, don't have 'simple' `Retry` method + +* [matryer/try](https://github.com/matryer/try) - very popular package, nonintuitive interface (for me) + +# BREAKING CHANGES + +* 4.0.0 + - infinity retry is possible by set `Attempts(0)` by PR [#49](https://github.com/avast/retry-go/pull/49) + +* 3.0.0 + - `DelayTypeFunc` accepts a new parameter `err` - this breaking change affects only your custom Delay Functions. This change allow [make delay functions based on error](examples/delay_based_on_error_test.go). + +* 1.0.2 -> 2.0.0 + - argument of `retry.Delay` is final delay (no multiplication by `retry.Units` anymore) + - function `retry.Units` are removed + - [more about this breaking change](https://github.com/avast/retry-go/issues/7) + +* 0.3.0 -> 1.0.0 + - `retry.Retry` function are changed to `retry.Do` function + - `retry.RetryCustom` (OnRetry) and `retry.RetryCustomWithOpts` functions are now implement via functions produces Options (aka `retry.OnRetry`) +*/ +package retry + +import ( + "context" + "errors" + "fmt" + "strings" + "time" +) + +// Function signature of retryable function +type RetryableFunc func() error + +// Function signature of retryable function with data +type RetryableFuncWithData[T any] func() (T, error) + +// Default timer is a wrapper around time.After +type timerImpl struct{} + +func (t *timerImpl) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func Do(retryableFunc RetryableFunc, opts ...Option) error { + retryableFuncWithData := func() (any, error) { + return nil, retryableFunc() + } + + _, err := DoWithData(retryableFuncWithData, opts...) + return err +} + +func DoWithData[T any](retryableFunc RetryableFuncWithData[T], opts ...Option) (T, error) { + var n uint + var emptyT T + + // default + config := newDefaultRetryConfig() + + // apply opts + for _, opt := range opts { + opt(config) + } + + if err := config.context.Err(); err != nil { + return emptyT, err + } + + // Setting attempts to 0 means we'll retry until we succeed + var lastErr error + if config.attempts == 0 { + for { + t, err := retryableFunc() + if err == nil { + return t, nil + } + + if !IsRecoverable(err) { + return emptyT, err + } + + if !config.retryIf(err) { + return emptyT, err + } + + lastErr = err + + config.onRetry(n, err) + n++ + select { + case <-config.timer.After(delay(config, n, err)): + case <-config.context.Done(): + if config.wrapContextErrorWithLastError { + return emptyT, Error{config.context.Err(), lastErr} + } + return emptyT, config.context.Err() + } + } + } + + errorLog := Error{} + + attemptsForError := make(map[error]uint, len(config.attemptsForError)) + for err, attempts := range config.attemptsForError { + attemptsForError[err] = attempts + } + + shouldRetry := true + for shouldRetry { + t, err := retryableFunc() + if err == nil { + return t, nil + } + + errorLog = append(errorLog, unpackUnrecoverable(err)) + + if !config.retryIf(err) { + break + } + + config.onRetry(n, err) + + for errToCheck, attempts := range attemptsForError { + if errors.Is(err, errToCheck) { + attempts-- + attemptsForError[errToCheck] = attempts + shouldRetry = shouldRetry && attempts > 0 + } + } + + // if this is last attempt - don't wait + if n == config.attempts-1 { + break + } + n++ + select { + case <-config.timer.After(delay(config, n, err)): + case <-config.context.Done(): + if config.lastErrorOnly { + return emptyT, config.context.Err() + } + + return emptyT, append(errorLog, config.context.Err()) + } + + shouldRetry = shouldRetry && n < config.attempts + } + + if config.lastErrorOnly { + return emptyT, errorLog.Unwrap() + } + return emptyT, errorLog +} + +func newDefaultRetryConfig() *Config { + return &Config{ + attempts: uint(10), + attemptsForError: make(map[error]uint), + delay: 100 * time.Millisecond, + maxJitter: 100 * time.Millisecond, + onRetry: func(n uint, err error) {}, + retryIf: IsRecoverable, + delayType: CombineDelay(BackOffDelay, RandomDelay), + lastErrorOnly: false, + context: context.Background(), + timer: &timerImpl{}, + } +} + +// Error type represents list of errors in retry +type Error []error + +// Error method return string representation of Error +// It is an implementation of error interface +func (e Error) Error() string { + logWithNumber := make([]string, len(e)) + for i, l := range e { + if l != nil { + logWithNumber[i] = fmt.Sprintf("#%d: %s", i+1, l.Error()) + } + } + + return fmt.Sprintf("All attempts fail:\n%s", strings.Join(logWithNumber, "\n")) +} + +func (e Error) Is(target error) bool { + for _, v := range e { + if errors.Is(v, target) { + return true + } + } + return false +} + +func (e Error) As(target interface{}) bool { + for _, v := range e { + if errors.As(v, target) { + return true + } + } + return false +} + +/* +Unwrap the last error for compatibility with `errors.Unwrap()`. +When you need to unwrap all errors, you should use `WrappedErrors()` instead. + + err := Do( + func() error { + return errors.New("original error") + }, + Attempts(1), + ) + + fmt.Println(errors.Unwrap(err)) # "original error" is printed + +Added in version 4.2.0. +*/ +func (e Error) Unwrap() error { + return e[len(e)-1] +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementation of the `errwrap.Wrapper` interface +// in package [errwrap](https://github.com/hashicorp/errwrap) so that +// `retry.Error` can be used with that library. +func (e Error) WrappedErrors() []error { + return e +} + +type unrecoverableError struct { + error +} + +func (e unrecoverableError) Error() string { + if e.error == nil { + return "unrecoverable error" + } + return e.error.Error() +} + +func (e unrecoverableError) Unwrap() error { + return e.error +} + +// Unrecoverable wraps an error in `unrecoverableError` struct +func Unrecoverable(err error) error { + return unrecoverableError{err} +} + +// IsRecoverable checks if error is an instance of `unrecoverableError` +func IsRecoverable(err error) bool { + return !errors.Is(err, unrecoverableError{}) +} + +// Adds support for errors.Is usage on unrecoverableError +func (unrecoverableError) Is(err error) bool { + _, isUnrecoverable := err.(unrecoverableError) + return isUnrecoverable +} + +func unpackUnrecoverable(err error) error { + if unrecoverable, isUnrecoverable := err.(unrecoverableError); isUnrecoverable { + return unrecoverable.error + } + + return err +} + +func delay(config *Config, n uint, err error) time.Duration { + delayTime := config.delayType(n, err, config) + if config.maxDelay > 0 && delayTime > config.maxDelay { + delayTime = config.maxDelay + } + + return delayTime +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 04f5ad449c0..303a65189dd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -73,6 +73,9 @@ github.com/ansel1/merry # github.com/ansel1/merry/v2 v2.0.1 ## explicit; go 1.12 github.com/ansel1/merry/v2 +# github.com/avast/retry-go/v4 v4.6.1 +## explicit; go 1.18 +github.com/avast/retry-go/v4 # github.com/aws/aws-sdk-go v1.55.8 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws From dd8df907426c4bc27d5e999a1ae48d13f5df2c54 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Wed, 3 Sep 2025 14:40:30 +0200 Subject: [PATCH 049/157] util: add modprobe helper for loading kernel modules There are several places where kernel modules are loaded, reduce the duplication of the code by introducing the new `kmod.Modprobe()` function. Signed-off-by: Niels de Vos --- internal/util/kmod/modprobe.go | 55 ++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 internal/util/kmod/modprobe.go diff --git a/internal/util/kmod/modprobe.go b/internal/util/kmod/modprobe.go new file mode 100644 index 00000000000..87a58b42bc1 --- /dev/null +++ b/internal/util/kmod/modprobe.go @@ -0,0 +1,55 @@ +/* +Copyright 2025 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kmod + +import ( + "context" + "fmt" + "os" + + "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/log" +) + +// Modprobe will check if the module is already available for use, and if that +// is not the case, it will try to load it. +func Modprobe(ctx context.Context, kmod string) error { + _, err := os.Stat("/sys/module/" + kmod) + if err == nil { + // module already loaded (or compiled into the kernel) + return nil + } + + if !os.IsNotExist(err) { + // something went really wrong + err = fmt.Errorf("failed to check availability of kernel module %q: %w", kmod, err) + log.WarningLog(ctx, err.Error()) + + return err + } + + // try to load the module + _, stderr, err := util.ExecCommand(ctx, "modprobe", kmod) + if err != nil { + err = fmt.Errorf("modprobe of kernel module %q failed (%w): %s", kmod, err, stderr) + log.WarningLog(ctx, err.Error()) + + return err + } + + return nil +} From 20360049bd56eb3e2db645d22fc0c0889601142b Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Wed, 3 Sep 2025 14:56:27 +0200 Subject: [PATCH 050/157] util: use kmod.Modprobe for loading kernel modules Signed-off-by: Niels de Vos --- internal/cephfs/mounter/kernel.go | 3 ++- internal/rbd/rbd_attach.go | 18 ++++++------------ internal/rbd/rbd_util.go | 6 ++---- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/internal/cephfs/mounter/kernel.go b/internal/cephfs/mounter/kernel.go index 260fdc57078..241c37e10ff 100644 --- a/internal/cephfs/mounter/kernel.go +++ b/internal/cephfs/mounter/kernel.go @@ -24,6 +24,7 @@ import ( "github.com/ceph/ceph-csi/internal/cephfs/store" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/kmod" ) const ( @@ -80,7 +81,7 @@ func (m *kernelMounter) mountKernel( volOptions *store.VolumeOptions, ) error { if m.needsModprobe { - if err := execCommandErr(ctx, "modprobe", kernelModule); err != nil { + if err := kmod.Modprobe(ctx, kernelModule); err != nil { return err } diff --git a/internal/rbd/rbd_attach.go b/internal/rbd/rbd_attach.go index c7001987238..d77baaa1603 100644 --- a/internal/rbd/rbd_attach.go +++ b/internal/rbd/rbd_attach.go @@ -20,13 +20,13 @@ import ( "context" "encoding/json" "fmt" - "os" "slices" "strings" "github.com/ceph/ceph-csi/pkg/util/kernel" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/kmod" "github.com/ceph/ceph-csi/internal/util/log" "github.com/avast/retry-go/v4" @@ -239,19 +239,13 @@ func waitForPath( // SetRbdNbdToolFeatures sets features available with rbd-nbd, and NBD module // loaded status. func SetRbdNbdToolFeatures() { - var stderr string - // check if the module is loaded or compiled in - _, err := os.Stat("/sys/module/" + moduleNbd) - if os.IsNotExist(err) { - // try to load the module - _, stderr, err = util.ExecCommand(context.TODO(), "modprobe", moduleNbd) - if err != nil { - hasNBD = false - log.WarningLogMsg("nbd modprobe failed (%v): %q", err, stderr) + err := kmod.Modprobe(context.TODO(), moduleNbd) + if err != nil { + hasNBD = false - return - } + return } + log.DefaultLog("nbd module loaded") // fetch the current running kernel info diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index dbfa1b3d6d6..f57be02ffaf 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -35,6 +35,7 @@ import ( rbderrors "github.com/ceph/ceph-csi/internal/rbd/errors" "github.com/ceph/ceph-csi/internal/rbd/types" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/kmod" "github.com/ceph/ceph-csi/internal/util/log" "github.com/ceph/go-ceph/rados" @@ -322,7 +323,6 @@ func prepareKrbdFeatureAttrs() (uint64, error) { // GetKrbdSupportedFeatures load the module if needed and return supported // features attribute as a string. func GetKrbdSupportedFeatures() (string, error) { - var stderr string // check if the module is loaded or compiled in _, err := os.Stat(krbdSupportedFeaturesFile) if err != nil { @@ -332,10 +332,8 @@ func GetKrbdSupportedFeatures() (string, error) { return "", err } // try to load the module - _, stderr, err = util.ExecCommand(context.TODO(), "modprobe", rbdDefaultMounter) + err = kmod.Modprobe(context.TODO(), rbdDefaultMounter) if err != nil { - log.ErrorLogMsg("modprobe failed (%v): %q", err, stderr) - return "", err } } From 9910bd0fb42f6168d1bd8ae38ac20badadd659e8 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 4 Sep 2025 10:21:10 +0530 Subject: [PATCH 051/157] util: add req ID for Controller[Un]PublishVolume calls Signed-off-by: Praveen M --- internal/csi-common/utils.go | 6 ++++++ internal/csi-common/utils_test.go | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/internal/csi-common/utils.go b/internal/csi-common/utils.go index 480cec39610..40e3e446d94 100644 --- a/internal/csi-common/utils.go +++ b/internal/csi-common/utils.go @@ -190,6 +190,7 @@ func GetIDFromReplication(req interface{}) string { } } +//nolint:gocyclo,cyclop // TODO: reduce complexity func getReqID(req interface{}) string { // if req is nil empty string will be returned reqID := "" @@ -208,6 +209,11 @@ func getReqID(req interface{}) string { case *csi.ControllerExpandVolumeRequest: reqID = r.GetVolumeId() + case *csi.ControllerPublishVolumeRequest: + reqID = r.GetVolumeId() + case *csi.ControllerUnpublishVolumeRequest: + reqID = r.GetVolumeId() + case *csi.NodeStageVolumeRequest: reqID = r.GetVolumeId() case *csi.NodeUnstageVolumeRequest: diff --git a/internal/csi-common/utils_test.go b/internal/csi-common/utils_test.go index 2df39da7285..8b1358e9bdb 100644 --- a/internal/csi-common/utils_test.go +++ b/internal/csi-common/utils_test.go @@ -49,6 +49,12 @@ func TestGetReqID(t *testing.T) { &csi.ControllerExpandVolumeRequest{ VolumeId: fakeID, }, + &csi.ControllerPublishVolumeRequest{ + VolumeId: fakeID, + }, + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: fakeID, + }, &csi.NodeStageVolumeRequest{ VolumeId: fakeID, }, From acc1c8bdad527406c1e8861103f2a58f8c881c6c Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 4 Sep 2025 10:22:16 +0530 Subject: [PATCH 052/157] rbd: expose GetUserIDMappingKey and GetClientAddressKey methods Signed-off-by: Praveen M --- internal/rbd/controllerserver.go | 6 +++--- internal/rbd/nodeserver.go | 4 ++-- internal/rbd/rbd_util.go | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index 00bf5cfa824..5a2a06ac795 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -1835,7 +1835,7 @@ func (cs *ControllerServer) removeUserIdMapping( return errors.New("nodeId cannot be empty") } - metadataKey := getUserIDMappingKey(rv.VolID, nodeId) + metadataKey := GetUserIDMappingKey(rv.VolID, nodeId) err := rv.RemoveMetadata(metadataKey) if err != nil && !errors.Is(err, librbd.ErrNotExist) { return fmt.Errorf("failed to remove user ID mapping for node %s on image %s: %w", nodeId, rv, err) @@ -1875,7 +1875,7 @@ func (cs *ControllerServer) fenceNode( return errors.New("nodeId cannot be empty") } - metadataKey := getClientAddressKey(rv.VolID, nodeId) + metadataKey := GetClientAddressKey(rv.VolID, nodeId) isOutOfService, err := k8s.IsNodeOutOfService(nodeId) if err != nil { return fmt.Errorf("failed to check if node %s is out of service: %w", nodeId, err) @@ -1947,7 +1947,7 @@ func (cs *ControllerServer) unfenceNode( return nil } - metadataKey := getClientAddressKey(rv.VolID, nodeId) + metadataKey := GetClientAddressKey(rv.VolID, nodeId) clientAddress, err := rv.GetMetadata(metadataKey) if err != nil { if errors.Is(err, librbd.ErrNotFound) { diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 92a690bec17..e715714b293 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -452,7 +452,7 @@ func (ns *NodeServer) setUserIdMapping( } nodeId := ns.Driver.GetNodeID() - metadataKey := getUserIDMappingKey(rv.VolID, nodeId) + metadataKey := GetUserIDMappingKey(rv.VolID, nodeId) err := rv.SetMetadata(metadataKey, cr.ID) if err != nil { return fmt.Errorf("failed to set client address for %s: %w", rv, err) @@ -479,7 +479,7 @@ func (ns *NodeServer) setClientAddress( } nodeId := ns.Driver.GetNodeID() - metadataKey := getClientAddressKey(rv.VolID, nodeId) + metadataKey := GetClientAddressKey(rv.VolID, nodeId) monitors, _ /* clusterID*/, err := util.GetMonsAndClusterID(ctx, rv.ClusterID, false) if err != nil { return fmt.Errorf("failed to get monitors for cluster %s: %w", rv.ClusterID, err) diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index f57be02ffaf..05604eb7005 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -286,13 +286,13 @@ var ( } ) -// getClientAddressKey returns the key for storing client address metadata for a specific node. -func getClientAddressKey(volumeId, nodeId string) string { +// GetClientAddressKey returns the key for storing client address metadata for a specific node. +func GetClientAddressKey(volumeId, nodeId string) string { return fmt.Sprintf("%s/%s/%s", clientAddressKey, volumeId, nodeId) } -// getUserIDMappingKey returns the key for storing user ID mapping metadata for a specific node. -func getUserIDMappingKey(volumeID, nodeID string) string { +// GetUserIDMappingKey returns the key for storing user ID mapping metadata for a specific node. +func GetUserIDMappingKey(volumeID, nodeID string) string { return fmt.Sprintf("%s/%s/%s", userIdMappingKey, volumeID, nodeID) } From fed23b8a5435b926e602b987d0b03902520f7c94 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 4 Sep 2025 13:55:15 +0530 Subject: [PATCH 053/157] rbd: ensure volume is unpublished before demotion Problem: Pod deletion followed by immediate volume demotion can create a race where the volume becomes read-only (secondary) before ControllerUnpublishVolume can remove client metadata, causing cleanup failures. Solution: Validate that client metadata has been removed before allowing volume demotion, ensuring proper unpublish first. Signed-off-by: Praveen M --- internal/csi-addons/rbd/replication.go | 32 ++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/internal/csi-addons/rbd/replication.go b/internal/csi-addons/rbd/replication.go index 8b180362d82..6e2d13dfec6 100644 --- a/internal/csi-addons/rbd/replication.go +++ b/internal/csi-addons/rbd/replication.go @@ -548,6 +548,12 @@ func (rs *ReplicationServer) DemoteVolume(ctx context.Context, // demote image to secondary if info.IsPrimary() { + // If volume is demoted first, ControllerUnpublishVolume will fail to remove metadata + // since the image will be Read-only. + // Ensure volume is unpublished before demotion. + if err := checkVolumeUnpublished(ctx, rs.Driver.GetNodeID(), rbdVol); err != nil { + return nil, status.Errorf(codes.FailedPrecondition, "volume cannot be demoted: %v", err) + } // store the image creation time for resync _, err = rbdVol.GetMetadata(imageCreationTimeKey) if err != nil && errors.Is(err, librbd.ErrNotFound) { @@ -993,3 +999,29 @@ func getCurrentReplicationStatus(ctx context.Context, mirrorGlobalStatus types.G return resp, desc } + +// checkVolumeUnpublished verifies that the volume has been properly unpublished +// by checking that clientaddress and userId mapping metadata is not present. +func checkVolumeUnpublished(ctx context.Context, nodeId string, rv types.Volume) error { + volumeId, err := rv.GetID(ctx) + if err != nil { + return err + } + + clientMetadataKeys := []string{ + corerbd.GetClientAddressKey(volumeId, nodeId), + corerbd.GetUserIDMappingKey(volumeId, nodeId), + } + + for _, key := range clientMetadataKeys { + _, err := rv.GetMetadata(key) + if err != nil && !errors.Is(err, librbd.ErrNotFound) { + return err + } + if !errors.Is(err, librbd.ErrNotFound) { + return fmt.Errorf("metadata %q is still present on the image %q", key, rv) + } + } + + return nil +} From 7bfad073d751841df38115363fb36a6c3ae3eb70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 21:03:57 +0000 Subject: [PATCH 054/157] rebase: bump actions/dependency-review-action from 4.7.2 to 4.7.3 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.7.2 to 4.7.3. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/bc41886e18ea39df68b1b1245f4184881938e050...595b5aeba73380359d98a5e087f648dbb0edce1b) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.7.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yaml b/.github/workflows/dependency-review.yaml index 0b23ac66377..cd7ad5cc125 100644 --- a/.github/workflows/dependency-review.yaml +++ b/.github/workflows/dependency-review.yaml @@ -25,6 +25,6 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: 'Dependency Review' # yamllint disable-line rule:line-length - uses: actions/dependency-review-action@bc41886e18ea39df68b1b1245f4184881938e050 # v4.7.2 + uses: actions/dependency-review-action@595b5aeba73380359d98a5e087f648dbb0edce1b # v4.7.3 with: allow-ghsas: GHSA-f4w6-3rh6-6q4q From cd305e5bcc201a214345303dcabc8a62636ed764 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 20:59:02 +0000 Subject: [PATCH 055/157] rebase: bump actions/github-script from 7.0.1 to 8.0.0 Bumps [actions/github-script](https://github.com/actions/github-script) from 7.0.1 to 8.0.0. - [Release notes](https://github.com/actions/github-script/releases) - [Commits](https://github.com/actions/github-script/compare/60a0d83039c74a4aee543508d2ffcb1c3799cdea...ed597411d8f924073f98dfc5c65a23a2325f34cd) --- updated-dependencies: - dependency-name: actions/github-script dependency-version: 8.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/pull-request-commentor.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull-request-commentor.yaml b/.github/workflows/pull-request-commentor.yaml index 3654298c8e5..c89beb4c45f 100644 --- a/.github/workflows/pull-request-commentor.yaml +++ b/.github/workflows/pull-request-commentor.yaml @@ -111,7 +111,7 @@ jobs: steps: - name: remove ok-to-test-label after commenting # yamllint disable-line rule:line-length - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: github-token: ${{ secrets.CEPH_CSI_BOT_TOKEN }} script: | From 30488aa7e62c0c489fe1dd83bb82a6878ba33f6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 20:58:56 +0000 Subject: [PATCH 056/157] rebase: bump actions/stale from 9.1.0 to 10.0.0 Bumps [actions/stale](https://github.com/actions/stale) from 9.1.0 to 10.0.0. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/5bef64f19d7facfb25b37b414482c7164d639639...3a9db7e6a41a89f618792c92c0e97cc736e1b13f) --- updated-dependencies: - dependency-name: actions/stale dependency-version: 10.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/stale.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 90eb489d967..8e205dfaaa7 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -19,7 +19,7 @@ jobs: if: github.repository == 'ceph/ceph-csi' steps: # yamllint disable-line rule:line-length - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 + - uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v10.0.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} days-before-issue-stale: 30 From 51ab516ee60f8ddddcfd3c034e30ab0a05b4315a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 20:45:24 +0000 Subject: [PATCH 057/157] rebase: bump github.com/onsi/ginkgo/v2 Bumps the github-dependencies group in /e2e with 1 update: [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo). Updates `github.com/onsi/ginkgo/v2` from 2.25.2 to 2.25.3 - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.25.2...v2.25.3) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-version: 2.25.3 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: github-dependencies ... Signed-off-by: dependabot[bot] --- e2e/go.mod | 4 ++-- e2e/go.sum | 12 ++++++------ e2e/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 6 ++++++ .../onsi/ginkgo/v2/reporters/default_reporter.go | 12 ++++++------ .../github.com/onsi/ginkgo/v2/types/version.go | 2 +- .../protobuf/internal/genid/api_gen.go | 6 ++++++ .../protobuf/internal/version/version.go | 2 +- .../protobuf/types/descriptorpb/descriptor.pb.go | 10 +++++++--- e2e/vendor/modules.txt | 6 +++--- 9 files changed, 38 insertions(+), 22 deletions(-) diff --git a/e2e/go.mod b/e2e/go.mod index 89cf114fa2a..71bf1ff29d1 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -13,7 +13,7 @@ require ( github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/google/uuid v1.6.0 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 - github.com/onsi/ginkgo/v2 v2.25.2 + github.com/onsi/ginkgo/v2 v2.25.3 github.com/onsi/gomega v1.38.2 // when updating k8s.io modules, update the 'replace' section below too k8s.io/api v0.33.4 @@ -131,7 +131,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/grpc v1.75.0 // indirect - google.golang.org/protobuf v1.36.7 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/e2e/go.sum b/e2e/go.sum index 14aa8af840c..f19de3ca74f 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -138,8 +138,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= -github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= +github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/cgroups v0.0.1 h1:MXjMkkFpKv6kpuirUa4USFBas573sSAY082B4CiHEVA= @@ -188,8 +188,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= +github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -280,8 +280,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= -google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/e2e/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 1c4d5329fac..0e5f23782f9 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,9 @@ +## 2.25.3 + +### Fixes + +- emit --github-output group only for progress report itself [f01aed1] + ## 2.25.2 ### Fixes diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 637232b227b..8c3714b8ae6 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -404,7 +404,7 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim case types.ReportEntry: r.emitReportEntry(indent, x) case types.ProgressReport: - r.emitProgressReport(indent, false, x) + r.emitProgressReport(indent, false, false, x) case types.SpecEvent: if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { r.emitSpecEvent(indent, x, isVeryVerbose) @@ -458,7 +458,7 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur if !failure.ProgressReport.IsZero() { r.emitBlock("\n") - r.emitProgressReport(indent, false, failure.ProgressReport) + r.emitProgressReport(indent, false, false, failure.ProgressReport) } if failure.AdditionalFailure != nil && includeAdditionalFailure { @@ -474,11 +474,11 @@ func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) } shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) - r.emitProgressReport(1, shouldEmitGW, report) + r.emitProgressReport(1, shouldEmitGW, true, report) r.emitDelimiter(1) } -func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput, emitGroup bool, report types.ProgressReport) { if report.Message != "" { r.emitBlock(r.fi(indent, report.Message+"\n")) indent += 1 @@ -514,7 +514,7 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput indent -= 1 } - if r.conf.GithubOutput { + if r.conf.GithubOutput && emitGroup { r.emitBlock(r.fi(indent, "::group::Progress Report")) } @@ -565,7 +565,7 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emitGoroutines(indent, otherGoroutines...) } - if r.conf.GithubOutput { + if r.conf.GithubOutput && emitGroup { r.emitBlock(r.fi(indent, "::endgroup::")) } } diff --git a/e2e/vendor/github.com/onsi/ginkgo/v2/types/version.go b/e2e/vendor/github.com/onsi/ginkgo/v2/types/version.go index 49f4a94a15f..6aca6efa81b 100644 --- a/e2e/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/e2e/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.25.2" +const VERSION = "2.25.3" diff --git a/e2e/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/e2e/vendor/google.golang.org/protobuf/internal/genid/api_gen.go index df8f9185013..3ceb6fa7f5e 100644 --- a/e2e/vendor/google.golang.org/protobuf/internal/genid/api_gen.go +++ b/e2e/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -27,6 +27,7 @@ const ( Api_SourceContext_field_name protoreflect.Name = "source_context" Api_Mixins_field_name protoreflect.Name = "mixins" Api_Syntax_field_name protoreflect.Name = "syntax" + Api_Edition_field_name protoreflect.Name = "edition" Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" @@ -35,6 +36,7 @@ const ( Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" + Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition" ) // Field numbers for google.protobuf.Api. @@ -46,6 +48,7 @@ const ( Api_SourceContext_field_number protoreflect.FieldNumber = 5 Api_Mixins_field_number protoreflect.FieldNumber = 6 Api_Syntax_field_number protoreflect.FieldNumber = 7 + Api_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Method. @@ -63,6 +66,7 @@ const ( Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" Method_Options_field_name protoreflect.Name = "options" Method_Syntax_field_name protoreflect.Name = "syntax" + Method_Edition_field_name protoreflect.Name = "edition" Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" @@ -71,6 +75,7 @@ const ( Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" + Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition" ) // Field numbers for google.protobuf.Method. @@ -82,6 +87,7 @@ const ( Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 Method_Options_field_number protoreflect.FieldNumber = 6 Method_Syntax_field_number protoreflect.FieldNumber = 7 + Method_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Mixin. diff --git a/e2e/vendor/google.golang.org/protobuf/internal/version/version.go b/e2e/vendor/google.golang.org/protobuf/internal/version/version.go index a53364c5992..697d1c14f3c 100644 --- a/e2e/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/e2e/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 7 + Patch = 8 PreRelease = "" ) diff --git a/e2e/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/e2e/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 6843b0beafa..4eacb523c33 100644 --- a/e2e/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/e2e/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -2873,7 +2873,10 @@ type FieldOptions struct { // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // DEPRECATED. DO NOT USE! // For Google-internal migration only. Do not use. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. @@ -2977,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool { return Default_FieldOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetWeak() bool { if x != nil && x.Weak != nil { return *x.Weak @@ -4843,7 +4847,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + - "\"\x9d\r\n" + + "\"\xa1\r\n" + "\fFieldOptions\x12A\n" + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + @@ -4852,9 +4856,9 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + "\n" + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + - "deprecated\x12\x19\n" + + "deprecated\x12\x1d\n" + "\x04weak\x18\n" + - " \x01(\b:\x05falseR\x04weak\x12(\n" + + " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + diff --git a/e2e/vendor/modules.txt b/e2e/vendor/modules.txt index 7cb1b9609ae..f92d4b9e156 100644 --- a/e2e/vendor/modules.txt +++ b/e2e/vendor/modules.txt @@ -269,7 +269,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.25.2 +# github.com/onsi/ginkgo/v2 v2.25.3 ## explicit; go 1.23.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -601,8 +601,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.7 -## explicit; go 1.22 +# google.golang.org/protobuf v1.36.8 +## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext From 3229a51f6e08763e218bf9968313e160bce0e8ad Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Wed, 10 Sep 2025 09:55:49 +0200 Subject: [PATCH 058/157] ci: use ceph-csi-bot for Mergify actions Mergify complains that it can not queue PRs anymore: > The PR cannot be merged because of GitHub restrictions. You must set > up a bot_account to automatically queue and merge PRs created by a > GitHub App, or modify the GitHub Actions workflows. The `bot_account` used to be a non-free option for Mergify, but that seems to have changed. Signed-off-by: Niels de Vos --- .mergify.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 3cb90ed65ea..d939ac9274d 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -1,4 +1,13 @@ --- +defaults: + actions: + update: + bot_account: ceph-csi-bot + rebase: + bot_account: ceph-csi-bot + merge: + merge_bot_account: ceph-csi-bot + queue_rules: - name: default merge_conditions: From c7b3383158a19f8b160020b7b566723d2d5eb08c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 21:04:20 +0000 Subject: [PATCH 059/157] rebase: bump the github-dependencies group with 3 updates Bumps the github-dependencies group with 3 updates: [github.com/aws/aws-sdk-go-v2/service/sts](https://github.com/aws/aws-sdk-go-v2), [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) and [github.com/stretchr/testify](https://github.com/stretchr/testify). Updates `github.com/aws/aws-sdk-go-v2/service/sts` from 1.38.0 to 1.38.3 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.38.0...v1.38.3) Updates `github.com/prometheus/client_golang` from 1.23.0 to 1.23.2 - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.23.0...v1.23.2) Updates `github.com/stretchr/testify` from 1.11.0 to 1.11.1 - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.11.0...v1.11.1) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/service/sts dependency-version: 1.38.3 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: github-dependencies - dependency-name: github.com/prometheus/client_golang dependency-version: 1.23.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: github-dependencies - dependency-name: github.com/stretchr/testify dependency-version: 1.11.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: github-dependencies ... Signed-off-by: dependabot[bot] --- go.mod | 21 +- go.sum | 42 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/middleware/user_agent.go | 2 + .../internal/configsources/CHANGELOG.md | 13 + .../configsources/go_module_metadata.go | 2 +- .../endpoints/awsrulesfn/partitions.go | 9 +- .../endpoints/awsrulesfn/partitions.json | 5 +- .../internal/endpoints/v2/CHANGELOG.md | 13 + .../endpoints/v2/go_module_metadata.go | 2 +- .../internal/accept-encoding/CHANGELOG.md | 4 + .../accept-encoding/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 13 + .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 13 + .../service/sts/go_module_metadata.go | 2 +- .../sts/internal/endpoints/endpoints.go | 3 + vendor/github.com/aws/smithy-go/CHANGELOG.md | 12 +- vendor/github.com/aws/smithy-go/README.md | 13 +- .../aws/smithy-go/go_module_metadata.go | 2 +- .../client_golang/prometheus/desc.go | 3 +- .../prometheus/internal/go_runtime_metrics.go | 2 +- .../client_golang/prometheus/labels.go | 3 +- .../prometheus/common/expfmt/decode.go | 39 +- .../prometheus/common/expfmt/encode.go | 10 +- .../prometheus/common/expfmt/expfmt.go | 12 +- .../prometheus/common/expfmt/fuzz.go | 9 +- .../common/expfmt/openmetrics_create.go | 11 +- .../prometheus/common/expfmt/text_create.go | 8 +- .../prometheus/common/expfmt/text_parse.go | 44 +- .../prometheus/common/model/labels.go | 35 +- .../prometheus/common/model/labelset.go | 10 +- .../prometheus/common/model/metric.go | 196 +- .../prometheus/common/model/time.go | 12 +- .../prometheus/common/model/value.go | 15 +- .../common/model/value_histogram.go | 10 +- .../prometheus/common/model/value_type.go | 4 +- vendor/go.yaml.in/yaml/v2/.travis.yml | 17 + vendor/go.yaml.in/yaml/v2/LICENSE | 201 ++ vendor/go.yaml.in/yaml/v2/LICENSE.libyaml | 31 + vendor/go.yaml.in/yaml/v2/NOTICE | 13 + vendor/go.yaml.in/yaml/v2/README.md | 131 + vendor/go.yaml.in/yaml/v2/apic.go | 744 +++++ vendor/go.yaml.in/yaml/v2/decode.go | 815 +++++ vendor/go.yaml.in/yaml/v2/emitterc.go | 1685 ++++++++++ vendor/go.yaml.in/yaml/v2/encode.go | 390 +++ vendor/go.yaml.in/yaml/v2/parserc.go | 1095 +++++++ vendor/go.yaml.in/yaml/v2/readerc.go | 412 +++ vendor/go.yaml.in/yaml/v2/resolve.go | 258 ++ vendor/go.yaml.in/yaml/v2/scannerc.go | 2711 +++++++++++++++++ vendor/go.yaml.in/yaml/v2/sorter.go | 113 + vendor/go.yaml.in/yaml/v2/writerc.go | 26 + vendor/go.yaml.in/yaml/v2/yaml.go | 478 +++ vendor/go.yaml.in/yaml/v2/yamlh.go | 739 +++++ vendor/go.yaml.in/yaml/v2/yamlprivateh.go | 173 ++ vendor/modules.txt | 23 +- 56 files changed, 10482 insertions(+), 173 deletions(-) create mode 100644 vendor/go.yaml.in/yaml/v2/.travis.yml create mode 100644 vendor/go.yaml.in/yaml/v2/LICENSE create mode 100644 vendor/go.yaml.in/yaml/v2/LICENSE.libyaml create mode 100644 vendor/go.yaml.in/yaml/v2/NOTICE create mode 100644 vendor/go.yaml.in/yaml/v2/README.md create mode 100644 vendor/go.yaml.in/yaml/v2/apic.go create mode 100644 vendor/go.yaml.in/yaml/v2/decode.go create mode 100644 vendor/go.yaml.in/yaml/v2/emitterc.go create mode 100644 vendor/go.yaml.in/yaml/v2/encode.go create mode 100644 vendor/go.yaml.in/yaml/v2/parserc.go create mode 100644 vendor/go.yaml.in/yaml/v2/readerc.go create mode 100644 vendor/go.yaml.in/yaml/v2/resolve.go create mode 100644 vendor/go.yaml.in/yaml/v2/scannerc.go create mode 100644 vendor/go.yaml.in/yaml/v2/sorter.go create mode 100644 vendor/go.yaml.in/yaml/v2/writerc.go create mode 100644 vendor/go.yaml.in/yaml/v2/yaml.go create mode 100644 vendor/go.yaml.in/yaml/v2/yamlh.go create mode 100644 vendor/go.yaml.in/yaml/v2/yamlprivateh.go diff --git a/go.mod b/go.mod index 0a563dbb054..9864d18af3b 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/IBM/keyprotect-go-client v0.15.1 github.com/avast/retry-go/v4 v4.6.1 github.com/aws/aws-sdk-go v1.55.8 - github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 + github.com/aws/aws-sdk-go-v2/service/sts v1.38.3 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/ceph/ceph-nvmeof/lib/go/nvmeof v0.0.0-20250812192600-037de4cfd423 github.com/ceph/go-ceph v0.35.0 @@ -27,8 +27,8 @@ require ( github.com/kubernetes-csi/csi-lib-utils v0.22.0 github.com/libopenstorage/secrets v0.0.0-20231011182615-5f4b25ceede1 github.com/pkg/xattr v0.4.12 - github.com/prometheus/client_golang v1.23.0 - github.com/stretchr/testify v1.11.0 + github.com/prometheus/client_golang v1.23.2 + github.com/stretchr/testify v1.11.1 go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.41.0 golang.org/x/net v0.43.0 @@ -70,12 +70,12 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect github.com/ansel1/merry v1.6.2 // indirect github.com/ansel1/merry/v2 v2.0.1 // indirect - github.com/aws/aws-sdk-go-v2 v1.38.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4 // indirect - github.com/aws/smithy-go v1.22.5 // indirect + github.com/aws/aws-sdk-go-v2 v1.39.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect + github.com/aws/smithy-go v1.23.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -129,7 +129,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -142,6 +142,7 @@ require ( go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect golang.org/x/term v0.34.0 // indirect diff --git a/go.sum b/go.sum index 839cbbde020..b7c05ec1670 100644 --- a/go.sum +++ b/go.sum @@ -108,20 +108,20 @@ github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBea github.com/aws/aws-sdk-go v1.44.164/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.38.1 h1:j7sc33amE74Rz0M/PoCpsZQ6OunLqys/m5antM0J+Z8= -github.com/aws/aws-sdk-go-v2 v1.38.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4 h1:IdCLsiiIj5YJ3AFevsewURCPV+YWUlOW8JiPhoAy8vg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4/go.mod h1:l4bdfCD7XyyZA9BolKBo1eLqgaJxl0/x91PL4Yqe0ao= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4 h1:j7vjtr1YIssWQOMeOWRbh3z8g2oY/xPjnZH2gLY4sGw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4/go.mod h1:yDmJgqOiH4EA8Hndnv4KwAo8jCGTSnM5ASG1nBI+toA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4 h1:ueB2Te0NacDMnaC+68za9jLwkjzxGWm0KB5HTUHjLTI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4/go.mod h1:nLEfLnVMmLvyIG58/6gsSA03F1voKGaCfHV7+lR8S7s= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 h1:iV1Ko4Em/lkJIsoKyGfc0nQySi+v0Udxr6Igq+y9JZc= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.0/go.mod h1:bEPcjW7IbolPfK67G1nilqWyoxYMSPrDiIQ3RdIdKgo= -github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= -github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aws/aws-sdk-go-v2 v1.39.0 h1:xm5WV/2L4emMRmMjHFykqiA4M/ra0DJVSWUkDyBjbg4= +github.com/aws/aws-sdk-go-v2 v1.39.0/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 h1:UCxq0X9O3xrlENdKf1r9eRJoKz/b0AfGkpp3a7FPlhg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7/go.mod h1:rHRoJUNUASj5Z/0eqI4w32vKvC7atoWR0jC+IkmVH8k= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 h1:Y6DTZUn7ZUC4th9FMBbo8LVE+1fyq3ofw+tRwkUd3PY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7/go.mod h1:x3XE6vMnU9QvHN/Wrx2s44kwzV2o2g5x/siw4ZUJ9g8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.3 h1:yEiZ0ztgji2GsCb/6uQSITXcGdtmWMfLRys0jJFiUkc= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.3/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -595,13 +595,13 @@ github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467/go.mod h1:Q8YyrNDvPp github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= -github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -643,8 +643,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= -github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -690,6 +690,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 89449f67b26..86f7edb16a0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.38.1" +const goModuleVersion = "1.39.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index 6ee3391be27..3314230fd8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -135,6 +135,8 @@ const ( UserAgentFeatureCredentialsAwsSdkStore = "y" // n/a (this is used by .NET based sdk) UserAgentFeatureCredentialsHTTP = "z" UserAgentFeatureCredentialsIMDS = "0" + + UserAgentFeatureBearerServiceEnvVars = "3" ) var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 48b7efd9d6c..553faf5037d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.4.7 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.6 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.5 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.4.4 (2025-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index e304ef67cf6..d0c4a4e33e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.4.4" +const goModuleVersion = "1.4.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 83e5ac62bf7..d4e6611f743 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -112,6 +112,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-southeast-6": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "ap-southeast-7": { Name: nil, DnsSuffix: nil, @@ -304,7 +311,7 @@ var partitions = []Partition{ DnsSuffix: "amazonaws.eu", DualStackDnsSuffix: "api.amazonwebservices.eu", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "eusc-de-east-1", }, Regions: map[string]RegionOverrides{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index 299cb220419..c6582c9c631 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -50,6 +50,9 @@ "ap-southeast-5" : { "description" : "Asia Pacific (Malaysia)" }, + "ap-southeast-6" : { + "description" : "Asia Pacific (New Zealand)" + }, "ap-southeast-7" : { "description" : "Asia Pacific (Thailand)" }, @@ -143,7 +146,7 @@ "dualStackDnsSuffix" : "api.amazonwebservices.eu", "implicitGlobalRegion" : "eusc-de-east-1", "name" : "aws-eusc", - "supportsDualStack" : false, + "supportsDualStack" : true, "supportsFIPS" : true }, "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index ec7d54beffd..4f0701fd0d7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,16 @@ +# v2.7.7 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.6 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.5 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + # v2.7.4 (2025-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index e7b4e1cd18c..af8faba143b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.7.4" +const goModuleVersion = "2.7.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index 32c9d515746..607fc092204 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.13.1 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. + # v1.13.0 (2025-07-28) * **Feature**: Add support for HTTP interceptors. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index f4b9f0b9488..7a0b6aae29a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.0" +const goModuleVersion = "1.13.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 62da8050f3b..ba1764f378c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.13.7 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.4 (2025-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 4a0c6ae3c9c..e65ba0d51bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.4" +const goModuleVersion = "1.13.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index ca18a1e9f2c..da58746a354 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.38.3 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.2 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.1 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.38.0 (2025-08-21) * **Feature**: Remove incorrect endpoint tests diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 931a5d81e11..d06365a8de9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.38.0" +const goModuleVersion = "1.38.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go index 3dfa51e5f4b..1dc87dd6bf1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -180,6 +180,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-southeast-5", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-6", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-southeast-7", }: endpoints.Endpoint{}, diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 1d60def6d1b..8b6ab295004 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,12 @@ +# Release (2025-08-27) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.23.0 + * **Feature**: Sort map keys in JSON Document types. + # Release (2025-07-24) ## General Highlights @@ -5,8 +14,7 @@ ## Module Highlights * `github.com/aws/smithy-go`: v1.22.5 - * **Bug Fix**: Fix HTTP metrics data race. - * **Bug Fix**: Replace usages of deprecated ioutil package. + * **Feature**: Add HTTP interceptors. # Release (2025-06-16) diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index c9ba5ea5e4b..77a74ae0c2c 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -8,15 +8,17 @@ The smithy-go runtime requires a minimum version of Go 1.22. **WARNING: All interfaces are subject to change.** -## Can I use the code generators? +## :no_entry_sign: DO NOT use the code generators in this repository + +**The code generators in this repository do not generate working clients at +this time.** In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), in order to generate transport mechanisms and serialization/deserialization code ("serde") accordingly. -The code generator does not currently support any protocols out of the box other than the new `smithy.protocols#rpcv2Cbor`, -therefore the useability of this project on its own is currently limited. +The code generator does not currently support any protocols out of the box. Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are tracking the movement of those out of the SDK into smithy-go in @@ -31,6 +33,7 @@ This repository implements the following Smithy build plugins: |----|------------|-------------| | `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. | | `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. | +| `go-shape-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go shape code generation (types only) for Smithy models. | **NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.** @@ -87,6 +90,10 @@ example created from `smithy init`: This plugin is a work-in-progress and is currently undocumented. +## `go-shape-codegen` + +This plugin is a work-in-progress and is currently undocumented. + ## License This project is licensed under the Apache-2.0 License. diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index cbbaabee9ef..945db0af309 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.5" +const goModuleVersion = "1.23.0" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index ad347113c04..2331b8b4f3b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index f7f97ef9262..d273b6640e4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c21911f292d..5fe8d3b4d29 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7f7..7b762370e27 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. +// NewDecoder returns a new decoder based on the given input format. Metric +// names are validated based on the provided Format -- if the format requires +// escaping, raditional Prometheues validity checking is used. Otherwise, names +// are checked for UTF-8 validity. Supported formats include delimited protobuf +// and Prometheus text format. For historical reasons, this decoder fallbacks +// to classic text decoding for any other format. This decoder does not fully +// support OpenMetrics although it may often succeed due to the similarities +// between the formats. This decoder may not support the latest features of +// Prometheus text format and is not intended for high-performance applications. +// See: https://github.com/prometheus/common/issues/812 func NewDecoder(r io.Reader, format Format) Decoder { + scheme := model.LegacyValidation + if format.ToEscapingScheme() == model.NoEscaping { + scheme = model.UTF8Validation + } switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} + return &protoDecoder{r: bufio.NewReader(r), s: scheme} + case TypeProtoText, TypeProtoCompact: + return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} } - return &textDecoder{r: r} + return &textDecoder{r: r, s: scheme} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { r protodelim.Reader + s model.ValidationScheme } // Decode implements the Decoder interface. @@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.s.IsValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.s.IsValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } @@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { return nil } +// errDecoder is an error-state decoder that always returns the same error. +type errDecoder struct { + err error +} + +func (d *errDecoder) Decode(*dto.MetricFamily) error { + return d.err +} + // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader fams map[string]*dto.MetricFamily + s model.ValidationScheme err error } @@ -126,7 +151,7 @@ type textDecoder struct { func (d *textDecoder) Decode(v *dto.MetricFamily) error { if d.err == nil { // Read all metrics in one shot. - var p TextParser + p := NewTextParser(d.s) d.fams, d.err = p.TextToMetricFamilies(d.r) // If we don't get an error, store io.EOF for the end. if d.err == nil { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f55d..73c24dfbc9c 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,14 +18,12 @@ import ( "io" "net/http" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" ) // Encoder types encode metric families into an underlying wire protocol. @@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error { // appropriate accepted type is found, FmtText is returned (which is the // Prometheus text format). This function will never negotiate FmtOpenMetrics, // as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. +// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. func Negotiate(h http.Header) Format { escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { @@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) + _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index b26886560d7..c34c7de432b 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -36,9 +36,11 @@ const ( ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + //nolint:revive // Allow for underscores. OpenMetricsVersion_0_0_1 = "0.0.1" + //nolint:revive // Allow for underscores. OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. Do not do direct @@ -54,8 +56,10 @@ const ( // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType { // Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid // "escaping" term exists, that will be used. Otherwise, the global default will // be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { +func (f Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(f), ";") { toks := strings.Split(p, "=") if len(toks) != 2 { continue diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dfac962a4e7..0290f6abc40 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -17,7 +17,11 @@ package expfmt -import "bytes" +import ( + "bytes" + + "github.com/prometheus/common/model" +) // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // @@ -26,9 +30,8 @@ import "bytes" // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { - parser := TextParser{} + parser := NewTextParser(model.UTF8Validation) _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { return 0 } diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec1f8..8dbf6d04ed6 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,10 @@ import ( "strconv" "strings" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/types/known/timestamppb" "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) type encoderOption struct { @@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E // Finally the samples, one line for each. if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" + compliantName += "_total" } for _, metric := range in.Metric { switch metricType { @@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - err = (*e).Timestamp.CheckValid() + err = e.Timestamp.CheckValid() if err != nil { return written, err } - ts := (*e).Timestamp.AsTime() + ts := e.Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b332..c4e9c1bbc3a 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" ) // enhancedWriter has all the enhanced write functions needed here. bufio.Writer @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { + if model.LegacyValidation.IsValidMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 4067978a178..8f2edde3244 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -78,6 +78,14 @@ type TextParser struct { // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool + // scheme sets the desired ValidationScheme for names. Defaults to the invalid + // UnsetValidation. + scheme model.ValidationScheme +} + +// NewTextParser returns a new TextParser with the provided nameValidationScheme. +func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { + return TextParser{scheme: nameValidationScheme} } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} + p.currentLabelPairs = nil if p.buf == nil { p.buf = bufio.NewReader(in) } else { @@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } // Now is the time to fix the type if it hasn't happened yet. if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() @@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn { switch p.currentByte { case ',': p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn { return p.startLabelName case '}': p.setOrCreateCurrentMF() + if p.err != nil { + p.currentLabelPairs = nil + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -341,6 +363,12 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + p.currentLabelPairs = nil + return nil + } + if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { + p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) + p.currentLabelPairs = nil return nil } // Special summary/histogram treatment. Don't add 'quantile' and 'le' @@ -353,13 +381,12 @@ func (p *TextParser) startLabelName() stateFn { labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { + if _, exists := labels[lName]; exists { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.currentLabelPairs = nil return nil } + labels[lName] = struct{}{} } return p.startLabelValue } @@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // special case of a summary/histogram, we can finally find out // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { + switch p.currentMF.GetType() { + case dto.MetricType_SUMMARY: signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary @@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + case dto.MetricType_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn { p.histograms[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else { + default: p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { @@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() { p.currentIsHistogramCount = false p.currentIsHistogramSum = false name := p.currentToken.String() + if !p.scheme.IsValidMetricName(name) { + p.parseError(fmt.Sprintf("invalid metric name %q", name)) + return + } if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { return } diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index de83afe93e9..dfeb34be5f3 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -32,6 +32,12 @@ const ( // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" + // MetricTypeLabel is the label name indicating the metric type of + // timeseries as per the PROM-39 proposal. + MetricTypeLabel = "__type__" + // MetricUnitLabel is the label name indicating the metric unit of + // timeseries as per the PROM-39 proposal. + MetricUnitLabel = "__unit__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. @@ -100,34 +106,21 @@ type LabelName string // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [ValidationScheme.IsValidLabelName] instead. func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidLabelName(string(ln)) } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for // legacy names. It does not use LabelNameRE for the check but a much faster // hardcoded implementation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [LegacyValidation.IsValidLabelName] instead. func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - // TODO: Apply De Morgan's law. Make sure there are tests for this. - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck - return false - } - } - return true + return LegacyValidation.IsValidLabelName(string(ln)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da334..9de47b2568e 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet { } // Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) +func (ls LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(ls)) - for k, v := range l { + for k, v := range ls { result[k] = v } @@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { } // UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { +func (ls *LabelSet) UnmarshalJSON(b []byte) error { var m map[LabelName]LabelValue if err := json.Unmarshal(b, &m); err != nil { return err @@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error { return fmt.Errorf("%q is not a valid label name", ln) } } - *l = LabelSet(m) + *ls = LabelSet(m) return nil } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index a6b01755bd4..3feebf328ae 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,6 +14,7 @@ package model import ( + "encoding/json" "errors" "fmt" "regexp" @@ -23,6 +24,7 @@ import ( "unicode/utf8" dto "github.com/prometheus/client_model/go" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/proto" ) @@ -62,16 +64,151 @@ var ( type ValidationScheme int const ( + // UnsetValidation represents an undefined ValidationScheme. + // Should not be used in practice. + UnsetValidation ValidationScheme = iota + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota + LegacyValidation // UTF8Validation only requires that metric and label names be valid UTF-8 // strings. UTF8Validation ) +var _ interface { + yaml.Marshaler + yaml.Unmarshaler + json.Marshaler + json.Unmarshaler + fmt.Stringer +} = new(ValidationScheme) + +// String returns the string representation of s. +func (s ValidationScheme) String() string { + switch s { + case UnsetValidation: + return "unset" + case LegacyValidation: + return "legacy" + case UTF8Validation: + return "utf8" + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (s ValidationScheme) MarshalYAML() (any, error) { + switch s { + case UnsetValidation: + return "", nil + case LegacyValidation, UTF8Validation: + return s.String(), nil + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { + var scheme string + if err := unmarshal(&scheme); err != nil { + return err + } + return s.Set(scheme) +} + +// MarshalJSON implements the json.Marshaler interface. +func (s ValidationScheme) MarshalJSON() ([]byte, error) { + switch s { + case UnsetValidation: + return json.Marshal("") + case UTF8Validation, LegacyValidation: + return json.Marshal(s.String()) + default: + return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { + var repr string + if err := json.Unmarshal(bytes, &repr); err != nil { + return err + } + return s.Set(repr) +} + +// Set implements the pflag.Value interface. +func (s *ValidationScheme) Set(text string) error { + switch text { + case "": + // Don't change the value. + case LegacyValidation.String(): + *s = LegacyValidation + case UTF8Validation.String(): + *s = UTF8Validation + default: + return fmt.Errorf("unrecognized ValidationScheme: %q", text) + } + return nil +} + +// IsValidMetricName returns whether metricName is valid according to s. +func (s ValidationScheme) IsValidMetricName(metricName string) bool { + switch s { + case LegacyValidation: + if len(metricName) == 0 { + return false + } + for i, b := range metricName { + if !isValidLegacyRune(b, i) { + return false + } + } + return true + case UTF8Validation: + if len(metricName) == 0 { + return false + } + return utf8.ValidString(metricName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) + } +} + +// IsValidLabelName returns whether labelName is valid according to s. +func (s ValidationScheme) IsValidLabelName(labelName string) bool { + switch s { + case LegacyValidation: + if len(labelName) == 0 { + return false + } + for i, b := range labelName { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck + return false + } + } + return true + case UTF8Validation: + if len(labelName) == 0 { + return false + } + return utf8.ValidString(labelName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) + } +} + +// Type implements the pflag.Value interface. +func (ValidationScheme) Type() string { + return "validationScheme" +} + type EscapingScheme int const ( @@ -101,7 +238,7 @@ const ( // Accept header, the default NameEscapingScheme will be used. EscapingKey = "escaping" - // Possible values for Escaping Key: + // Possible values for Escaping Key. AllowUTF8 = "allow-utf-8" // No escaping required. EscapeUnderscores = "underscores" EscapeDots = "dots" @@ -175,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint { // IsValidMetricName returns true iff name matches the pattern of MetricNameRE // for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is // selected. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [ValidationScheme.IsValidMetricName] instead. func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidMetricName(string(n)) } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [LegacyValidation.IsValidMetricName] instead. func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true + return LegacyValidation.IsValidMetricName(n) } // EscapeMetricFamily escapes the given metric names and labels with the given @@ -310,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string { case DotsEscaping: // Do not early return for legacy valid names, we still escape underscores. for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if b == '.' { + case b == '.': escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else { + default: escaped.WriteString("__") } } @@ -327,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { + case !utf8.ValidRune(b): escaped.WriteString("_FFFD_") - } else { + default: escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') @@ -345,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } } -// lower function taken from strconv.atoi +// lower function taken from strconv.atoi. func lower(c byte) byte { return c | ('x' - 'X') } @@ -409,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string { } r := lower(escapedName[i]) utf8Val *= 16 - if r >= '0' && r <= '9' { + switch { + case r >= '0' && r <= '9': utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { + case r >= 'a' && r <= 'f': utf8Val += uint(r) - 'a' + 10 - } else { + default: return name } i++ diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index fed9e87b915..1730b0fdc12 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error { p := strings.Split(string(b), ".") switch len(p) { case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } *t = Time(v * second) case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } @@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { if prec < 0 { p[1] = p[1][:dotPrecision] } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) + p[1] += strings.Repeat("0", prec) } va, err := strconv.ParseInt(p[1], 10, 32) @@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value +// Set implements pflag/flag.Value. func (d *Duration) Set(s string) error { var err error *d, err = ParseDuration(s) return err } -// Type implements pflag.Value -func (d *Duration) Type() string { +// Type implements pflag.Value. +func (*Duration) Type() string { return "duration" } diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 8050637d822..a9995a37eee 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -191,7 +191,8 @@ func (ss SampleStream) String() string { } func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + switch { + case len(ss.Histograms) > 0 && len(ss.Values) > 0: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { + case len(ss.Histograms) > 0: v := struct { Metric Metric `json:"metric"` Histograms []SampleHistogramPair `json:"histograms"` @@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else { + default: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -258,7 +259,7 @@ func (s Scalar) String() string { // MarshalJSON implements json.Marshaler. func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) + return json.Marshal([...]interface{}{s.Timestamp, v}) } // UnmarshalJSON implements json.Unmarshaler. @@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) +func (m Matrix) String() string { + matCp := make(Matrix, len(m)) + copy(matCp, m) sort.Sort(matCp) strs := make([]string, len(matCp)) diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e839..91ce5b7a45c 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool { return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) } -func (b HistogramBucket) String() string { +func (s HistogramBucket) String() string { var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 + upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 if lowerInclusive { sb.WriteRune('[') } else { sb.WriteRune('(') } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) if upperInclusive { sb.WriteRune(']') } else { sb.WriteRune(')') } - fmt.Fprintf(&sb, ":%v", b.Count) + fmt.Fprintf(&sb, ":%v", s.Count) return sb.String() } diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go index 726c50ee638..078910f46b7 100644 --- a/vendor/github.com/prometheus/common/model/value_type.go +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error { return nil } -func (e ValueType) String() string { - switch e { +func (et ValueType) String() string { + switch et { case ValNone: return "" case ValScalar: diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml new file mode 100644 index 00000000000..7348c50c0c3 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/go.yaml.in/yaml/v2/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml new file mode 100644 index 00000000000..8da58fbf6f8 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/go.yaml.in/yaml/v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE new file mode 100644 index 00000000000..866d74a7ad7 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md new file mode 100644 index 00000000000..c9388da4250 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *go.yaml.in/yaml/v2*. + +To install it, run: + + go get go.yaml.in/yaml/v2 + +API documentation +----------------- + +See: + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/go.yaml.in/yaml/v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go new file mode 100644 index 00000000000..acf71402cf3 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go new file mode 100644 index 00000000000..129bc2a97d3 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/go.yaml.in/yaml/v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go new file mode 100644 index 00000000000..a1c2cc52627 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go new file mode 100644 index 00000000000..0ee738e11b6 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/go.yaml.in/yaml/v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go new file mode 100644 index 00000000000..81d05dfe573 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go new file mode 100644 index 00000000000..7c1f5fac3db --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go new file mode 100644 index 00000000000..4120e0c9160 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go new file mode 100644 index 00000000000..0b9bb6030a0 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go new file mode 100644 index 00000000000..4c45e660a8f --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/go.yaml.in/yaml/v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go new file mode 100644 index 00000000000..a2dde608cb7 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/go.yaml.in/yaml/v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go new file mode 100644 index 00000000000..5248e1263c5 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/go.yaml.in/yaml/v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go new file mode 100644 index 00000000000..f6a9c8e34b1 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go new file mode 100644 index 00000000000..8110ce3c37a --- /dev/null +++ b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 303a65189dd..426a592a851 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -121,7 +121,7 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2 v1.38.1 +# github.com/aws/aws-sdk-go-v2 v1.39.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults @@ -144,24 +144,24 @@ github.com/aws/aws-sdk-go-v2/internal/sdk github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 +# github.com/aws/aws-sdk-go-v2/service/sts v1.38.3 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.22.5 +# github.com/aws/smithy-go v1.23.0 ## explicit; go 1.22 github.com/aws/smithy-go github.com/aws/smithy-go/auth @@ -451,7 +451,7 @@ github.com/pkg/xattr # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.23.0 +# github.com/prometheus/client_golang v1.23.2 ## explicit; go 1.23.0 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -463,7 +463,7 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.65.0 +# github.com/prometheus/common v0.66.1 ## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model @@ -481,7 +481,7 @@ github.com/sirupsen/logrus # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.11.0 +# github.com/stretchr/testify v1.11.1 ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml @@ -540,6 +540,9 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore +# go.yaml.in/yaml/v2 v2.4.2 +## explicit; go 1.15 +go.yaml.in/yaml/v2 # golang.org/x/crypto v0.41.0 ## explicit; go 1.23.0 golang.org/x/crypto/argon2 From c69e0a31ea4a9c444f6dd3f25a88d2d37a645ba2 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 10 Sep 2025 13:55:54 +0200 Subject: [PATCH 060/157] ci: set update_bot_account for queue with Mergify Removed default actions and updated bot account for queue rules. Fixes https://github.com/Mergifyio/mergify/discussions/5142 Signed-off-by: Julien Danjou --- .mergify.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.mergify.yml b/.mergify.yml index d939ac9274d..0f4b2a2f208 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -10,6 +10,7 @@ defaults: queue_rules: - name: default + update_bot_account: ceph-csi-bot merge_conditions: # Conditions to get out of the queue (= merged) - or: From d05758f241215e8d1f18518fa36980685a9ae6fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 21:03:39 +0000 Subject: [PATCH 061/157] rebase: bump the k8s-dependencies group with 2 updates Bumps the k8s-dependencies group with 2 updates: [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) and [k8s.io/utils](https://github.com/kubernetes/utils). Updates `k8s.io/kubernetes` from 1.33.4 to 1.34.0 - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.33.4...v1.34.0) Updates `k8s.io/utils` from 0.0.0-20241210054802-24370beab758 to 0.0.0-20250604170112-4c0f3b243397 - [Commits](https://github.com/kubernetes/utils/commits) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-version: 1.34.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies - dependency-name: k8s.io/utils dependency-version: 0.0.0-20250604170112-4c0f3b243397 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] --- go.mod | 24 +- go.sum | 42 +- .../emicklei/go-restful/v3/CHANGES.md | 5 +- .../emicklei/go-restful/v3/README.md | 2 +- .../emicklei/go-restful/v3/jsr311.go | 19 +- .../emicklei/go-restful/v3/route.go | 2 + .../github.com/fsnotify/fsnotify/.cirrus.yml | 7 +- .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 3 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 67 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 121 +- vendor/github.com/fsnotify/fsnotify/README.md | 2 - .../fsnotify/fsnotify/backend_fen.go | 401 ++--- .../fsnotify/fsnotify/backend_inotify.go | 735 +++++----- .../fsnotify/fsnotify/backend_kqueue.go | 783 +++++----- .../fsnotify/fsnotify/backend_other.go | 203 +-- .../fsnotify/fsnotify/backend_windows.go | 321 ++-- .../github.com/fsnotify/fsnotify/fsnotify.go | 370 ++++- .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 + .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 + .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 + .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 259 ---- vendor/github.com/fsnotify/fsnotify/shared.go | 64 + .../fsnotify/fsnotify/staticcheck.conf | 3 + .../fsnotify/fsnotify/system_bsd.go | 1 - .../fsnotify/fsnotify/system_darwin.go | 1 - vendor/github.com/fxamacker/cbor/v2/README.md | 607 +++++--- .../fxamacker/cbor/v2/bytestring.go | 27 + vendor/github.com/fxamacker/cbor/v2/cache.go | 25 +- vendor/github.com/fxamacker/cbor/v2/common.go | 9 + vendor/github.com/fxamacker/cbor/v2/decode.go | 425 ++++-- vendor/github.com/fxamacker/cbor/v2/doc.go | 51 +- vendor/github.com/fxamacker/cbor/v2/encode.go | 442 +++++- .../fxamacker/cbor/v2/encode_map.go | 10 +- .../fxamacker/cbor/v2/encode_map_go117.go | 60 - .../fxamacker/cbor/v2/omitzero_go124.go | 8 + .../fxamacker/cbor/v2/omitzero_pre_go124.go | 8 + .../fxamacker/cbor/v2/simplevalue.go | 29 + vendor/github.com/fxamacker/cbor/v2/stream.go | 4 +- .../fxamacker/cbor/v2/structfields.go | 18 +- vendor/github.com/fxamacker/cbor/v2/tag.go | 48 +- .../google/gnostic-models/compiler/context.go | 2 +- .../gnostic-models/compiler/extensions.go | 2 +- .../google/gnostic-models/compiler/helpers.go | 2 +- .../google/gnostic-models/compiler/reader.go | 2 +- .../gnostic-models/jsonschema/models.go | 2 +- .../gnostic-models/jsonschema/reader.go | 2 +- .../gnostic-models/jsonschema/writer.go | 2 +- .../gnostic-models/openapiv2/OpenAPIv2.go | 80 +- .../gnostic-models/openapiv2/document.go | 2 +- .../gnostic-models/openapiv3/OpenAPIv3.go | 24 +- .../gnostic-models/openapiv3/document.go | 2 +- .../modern-go/reflect2/safe_type.go | 22 +- .../fsnotify => spf13/pflag}/.editorconfig | 12 +- vendor/github.com/spf13/pflag/.golangci.yaml | 4 + vendor/github.com/spf13/pflag/flag.go | 29 +- vendor/github.com/spf13/pflag/ip.go | 3 + vendor/github.com/spf13/pflag/ipnet_slice.go | 147 ++ vendor/github.com/spf13/pflag/string_array.go | 4 - .../grpc/otelgrpc/stats_handler.go | 51 +- .../grpc/otelgrpc/version.go | 2 +- .../yaml/v3/LICENSE} | 39 +- .../goyaml.v2 => go.yaml.in/yaml/v3}/NOTICE | 0 vendor/go.yaml.in/yaml/v3/README.md | 171 +++ .../goyaml.v2 => go.yaml.in/yaml/v3}/apic.go | 61 +- .../yaml/v3}/decode.go | 647 +++++--- .../yaml/v3}/emitterc.go | 457 +++++- vendor/go.yaml.in/yaml/v3/encode.go | 577 ++++++++ .../yaml/v3}/parserc.go | 305 +++- .../yaml/v3}/readerc.go | 24 +- .../yaml/v3}/resolve.go | 138 +- .../yaml/v3}/scannerc.go | 397 ++++- .../yaml/v3}/sorter.go | 25 +- vendor/go.yaml.in/yaml/v3/writerc.go | 48 + vendor/go.yaml.in/yaml/v3/yaml.go | 703 +++++++++ .../goyaml.v2 => go.yaml.in/yaml/v3}/yamlh.go | 90 +- .../yaml/v3}/yamlprivateh.go | 31 +- .../k8s.io/kube-openapi/pkg/common/common.go | 4 +- .../kube-openapi/pkg/schemaconv/openapi.go | 2 +- .../pkg/schemaconv/proto_models.go | 2 +- .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 2 +- .../kube-openapi/pkg/util/proto/document.go | 2 +- .../pkg/util/proto/document_v3.go | 2 +- .../kubernetes/pkg/features/kube_features.go | 1298 ++++++++++------- .../kubernetes/pkg/kubelet/events/event.go | 1 + .../k8s.io/kubernetes/pkg/volume/plugins.go | 18 +- .../kubernetes/pkg/volume/util/types/types.go | 4 + vendor/k8s.io/kubernetes/pkg/volume/volume.go | 9 + .../kubernetes/pkg/volume/volume_linux.go | 4 +- .../pkg/volume/volume_unsupported.go | 6 +- vendor/k8s.io/utils/buffer/ring_growing.go | 122 +- vendor/modules.txt | 39 +- .../v6}/LICENSE | 0 .../structured-merge-diff/v6/schema/doc.go | 28 + .../v6/schema/elements.go | 375 +++++ .../structured-merge-diff/v6/schema/equals.go | 202 +++ .../v6/schema/schemaschema.go | 165 +++ vendor/sigs.k8s.io/yaml/.travis.yml | 12 - vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS | 24 - vendor/sigs.k8s.io/yaml/goyaml.v2/README.md | 200 +-- vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go | 390 ----- vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go | 26 - vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go | 478 ------ .../yaml/goyaml.v2/yaml_aliases.go | 85 ++ vendor/sigs.k8s.io/yaml/yaml.go | 11 +- vendor/sigs.k8s.io/yaml/yaml_go110.go | 31 - 118 files changed, 8762 insertions(+), 4605 deletions(-) delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh create mode 100644 vendor/github.com/fsnotify/fsnotify/shared.go create mode 100644 vendor/github.com/fsnotify/fsnotify/staticcheck.conf delete mode 100644 vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go rename vendor/github.com/{fsnotify/fsnotify => spf13/pflag}/.editorconfig (69%) create mode 100644 vendor/github.com/spf13/pflag/.golangci.yaml create mode 100644 vendor/github.com/spf13/pflag/ipnet_slice.go rename vendor/{sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml => go.yaml.in/yaml/v3/LICENSE} (51%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/NOTICE (100%) create mode 100644 vendor/go.yaml.in/yaml/v3/README.md rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/apic.go (93%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/decode.go (50%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/emitterc.go (76%) create mode 100644 vendor/go.yaml.in/yaml/v3/encode.go rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/parserc.go (81%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/readerc.go (91%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/resolve.go (60%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/scannerc.go (86%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/sorter.go (76%) create mode 100644 vendor/go.yaml.in/yaml/v3/writerc.go create mode 100644 vendor/go.yaml.in/yaml/v3/yaml.go rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/yamlh.go (88%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v3}/yamlprivateh.go (79%) rename vendor/sigs.k8s.io/{yaml/goyaml.v2 => structured-merge-diff/v6}/LICENSE (100%) create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v6/schema/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v6/schema/equals.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v6/schema/schemaschema.go delete mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go delete mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/go.mod b/go.mod index 9864d18af3b..53c8300613a 100644 --- a/go.mod +++ b/go.mod @@ -39,9 +39,9 @@ require ( k8s.io/apimachinery v0.33.4 k8s.io/cloud-provider v0.33.4 k8s.io/klog/v2 v2.130.1 - k8s.io/kubernetes v1.33.4 + k8s.io/kubernetes v1.34.0 k8s.io/mount-utils v0.33.4 - k8s.io/utils v0.0.0-20241210054802-24370beab758 + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 ) require ( @@ -81,10 +81,10 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/gemalto/flume v0.13.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-jose/go-jose/v4 v4.1.1 // indirect @@ -96,7 +96,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -121,7 +121,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/selinux v1.11.1 // indirect github.com/openshift/api v0.0.0-20240115183315-0793e918179d // indirect @@ -133,16 +133,17 @@ require ( github.com/prometheus/procfs v0.16.1 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect go.opentelemetry.io/otel v1.37.0 // indirect go.opentelemetry.io/otel/metric v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect golang.org/x/term v0.34.0 // indirect @@ -159,9 +160,10 @@ require ( k8s.io/component-base v0.33.4 // indirect k8s.io/controller-manager v0.33.4 // indirect k8s.io/csi-translation-lib v0.33.4 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index b7c05ec1670..4cfd805584c 100644 --- a/go.sum +++ b/go.sum @@ -177,8 +177,8 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= -github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -200,10 +200,11 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gemalto/flume v0.13.0 h1:EEeQvAxyFys3BH8IxEU7ZpM6Kr1sYn20HuZq6dgyMR8= github.com/gemalto/flume v0.13.0/go.mod h1:3iOEZiK/HD8SnFTqHCQoOHQKaHlBY0b6z55P8SLaOzk= github.com/gemalto/kmip-go v0.0.10 h1:jAAZejUdRrspKigLoA62MTmIj0T7DDDOzdxHi1cDjoU= @@ -314,8 +315,9 @@ github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl76 github.com/google/fscrypt v0.3.6-0.20240502174735-068b9f8f5dec h1:bXRTgu+1I882EvvYVEUwehBVahYfqeX9Qqb9eUyPs/g= github.com/google/fscrypt v0.3.6-0.20240502174735-068b9f8f5dec/go.mod h1:HyY8Z/kUPrnIKAwuhjrn2tSTM5/s9zfRRTqRMG0mHks= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -506,8 +508,9 @@ github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBL github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -621,8 +624,9 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -663,8 +667,8 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= @@ -692,6 +696,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1388,10 +1394,11 @@ k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubernetes v1.33.4 h1:T1d5FLUYm3/KyUeV7YJhKTR980zHCHb7K2xhCSo3lE8= -k8s.io/kubernetes v1.33.4/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w= +k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto= k8s.io/mount-utils v0.33.4 h1:o83Qx0AgY5JDYhFV6gAYfSy+GAPIuPqSKdEqac5lxqo= k8s.io/mount-utils v0.33.4/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1399,8 +1406,8 @@ k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= -k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -1417,7 +1424,10 @@ sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxO sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 92b78048e23..6f24dfff562 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,8 @@ # Change history of go-restful +## [v3.12.2] - 2025-02-21 + +- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) ## [v3.12.1] - 2024-05-28 @@ -18,7 +21,7 @@ - fix by restoring custom JSON handler functions (Mike Beaumont #540) -## [v3.12.0] - 2023-08-19 +## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 7234604e47b..3fb40d19808 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -3,7 +3,7 @@ go-restful package for building REST-style Web Services using Google Go [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) - [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index a9b3faaa81f..7f04bd90533 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma return params } -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { candidates := make([]*Route, 0, 8) for i, each := range routes { @@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept @@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } - // if POST,PUT,PATCH without body - method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") - if (method == http.MethodPost || - method == http.MethodPut || - method == http.MethodPatch) && (length == "" || length == "0") { - return nil, NewError( - http.StatusUnsupportedMediaType, - fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } return nil, NewError( http.StatusNotAcceptable, - fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", "))) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 306c44be779..a2056e2acbb 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool { } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +// If the route does not specify Consumes then return true (*/*). +// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE. func (r Route) matchesContentType(mimeTypes string) bool { if len(r.Consumes) == 0 { diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b3c..7f257e99ac9 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-2 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0a..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b12..daea9dd6d6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e57575496..6468d2cf400 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,69 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.9.0 2024-04-04 +---------------- + +### Changes and fixes + +- all: make BufferedWatcher buffered again ([#657]) + +- inotify: fix race when adding/removing watches while a watched path is being + deleted ([#678], [#686]) + +- inotify: don't send empty event if a watched path is unmounted ([#655]) + +- inotify: don't register duplicate watches when watching both a symlink and its + target; previously that would get "half-added" and removing the second would + panic ([#679]) + +- kqueue: fix watching relative symlinks ([#681]) + +- kqueue: correctly mark pre-existing entries when watching a link to a dir on + kqueue ([#682]) + +- illumos: don't send error if changed file is deleted while processing the + event ([#678]) + + +[#657]: https://github.com/fsnotify/fsnotify/pull/657 +[#678]: https://github.com/fsnotify/fsnotify/pull/678 +[#686]: https://github.com/fsnotify/fsnotify/pull/686 +[#655]: https://github.com/fsnotify/fsnotify/pull/655 +[#681]: https://github.com/fsnotify/fsnotify/pull/681 +[#679]: https://github.com/fsnotify/fsnotify/pull/679 +[#682]: https://github.com/fsnotify/fsnotify/pull/682 + +1.8.0 2024-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d51..4cc40fa597d 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,125 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + print [any strings] # Print text to stdout; for debugging. + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index e480733d16c..1f4eb583d50 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -15,7 +15,6 @@ Platform support: | ReadDirectoryChangesW | Windows | Supported | | FEN | illumos | Supported | | fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | -| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | | FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | | USN Journals | Windows | [Needs support in x/sys/windows][usn] | | Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | @@ -25,7 +24,6 @@ untested. [fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 [usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 -[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 Usage ----- diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd8e..57fc6928484 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,162 +1,44 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify import ( "errors" "fmt" + "io/fs" "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { + *shared Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} +var defaultBufferSize = 0 -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), - done: make(chan struct{}), +func newBackend(ev chan Event, errs chan error) (backend, error) { + w := &fen{ + shared: newShared(ev, errs), + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), } var err error @@ -169,104 +51,28 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return w, nil } -// sendEvent attempts to send an event to the user, returning true if the event -// was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { - select { - case w.Events <- Event{Name: name, Op: op}: - return true - case <-w.done: - return false - } -} - -// sendError attempts to send an error to the user, returning true if the error -// was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - // Take the lock used by associateFile to prevent lingering events from - // being processed after the close - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed() { +func (w *fen) Close() error { + if w.shared.close() { return nil } - close(w.done) return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +89,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +100,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +148,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -367,7 +169,7 @@ func (w *Watcher) readEvents() { return } // There was an error not caused by calling w.Close() - if !w.sendError(err) { + if !w.sendError(fmt.Errorf("port.Get: %w", err)) { return } } @@ -382,17 +184,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +222,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -433,13 +237,13 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { isWatched := watchedDir || watchedPath if events&unix.FILE_DELETE != 0 { - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } reRegister = false } if events&unix.FILE_RENAME_FROM != 0 { - if !w.sendEvent(path, Rename) { + if !w.sendEvent(Event{Name: path, Op: Rename}) { return nil } // Don't keep watching the new file name @@ -453,7 +257,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { // inotify reports a Remove event in this case, so we simulate this // here. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't keep watching the file that was removed @@ -487,7 +291,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { // get here, the sudirectory is already gone. Clearly we were watching // this path but now it is gone. Let's tell the user that it was // removed. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Suppress extra write events on removed directories; they are not @@ -502,7 +306,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { if err != nil { // The symlink still exists, but the target is gone. Report the // Remove similar to above. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't return the error @@ -510,18 +314,12 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { - if !w.sendEvent(path, Write) { + if !w.sendEvent(Event{Name: path, Op: Write}) { return nil } } @@ -529,7 +327,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { if events&unix.FILE_ATTRIB != 0 && stat != nil { // Only send Chmod if perms changed if stat.Mode().Perm() != fmode.Perm() { - if !w.sendEvent(path, Chmod) { + if !w.sendEvent(Event{Name: path, Op: Chmod}) { return nil } } @@ -538,17 +336,27 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { if stat != nil { // If we get here, it means we've hit an event above that requires us to // continue watching the file or directory - return w.associateFile(path, stat, isWatched) + err := w.associateFile(path, stat, isWatched) + if errors.Is(err, fs.ErrNotExist) { + // Path may have been removed since the stat. + err = nil + } + return err } return nil } -func (w *Watcher) updateDirectory(path string) error { - // The directory was modified, so we must find unwatched entities and watch - // them. If something was removed from the directory, nothing will happen, - // as everything else should still be watched. +// The directory was modified, so we must find unwatched entities and watch +// them. If something was removed from the directory, nothing will happen, as +// everything else should still be watched. +func (w *fen) updateDirectory(path string) error { files, err := os.ReadDir(path) if err != nil { + // Directory no longer exists: probably just deleted since we got the + // event. + if errors.Is(err, fs.ErrNotExist) { + return nil + } return err } @@ -563,19 +371,22 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if errors.Is(err, fs.ErrNotExist) { + // File may have disappeared between getting the dir listing and + // adding the port: that's okay to ignore. + continue } - if !w.sendEvent(path, Create) { + if !w.sendError(err) { + return nil + } + if !w.sendEvent(Event{Name: path, Op: Create}) { return nil } } return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +404,42 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { - return err + if err != nil && !errors.Is(err, unix.ENOENT) { + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED + } + if true { + events |= unix.FILE_ATTRIB } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + err := w.port.AssociatePath(path, stat, events, stat.Mode()) + if err != nil { + return fmt.Errorf("port.AssociatePath(%q): %w", path, err) + } + return nil } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } - return w.port.DissociatePath(path) + err := w.port.DissociatePath(path) + if err != nil { + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) + } + return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +457,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e401..a36cb89d736 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,21 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { + *shared Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -138,21 +28,41 @@ type Watcher struct { fd int inotifyFile *os.File watches *watches - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( watches struct { - mu sync.RWMutex wd map[uint32]*watch // wd → watch path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -163,57 +73,43 @@ func newWatches() *watches { } } -func (w *watches) len() int { - w.mu.RLock() - defer w.mu.RUnlock() - return len(w.wd) -} - -func (w *watches) add(ww *watch) { - w.mu.Lock() - defer w.mu.Unlock() - w.wd[ww.wd] = ww - w.path[ww.path] = ww.wd -} - -func (w *watches) remove(wd uint32) { - w.mu.Lock() - defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) - delete(w.wd, wd) -} - -func (w *watches) removePath(path string) (uint32, bool) { - w.mu.Lock() - defer w.mu.Unlock() +func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] } +func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] } +func (w *watches) len() int { return len(w.wd) } +func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd } +func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) } +func (w *watches) removePath(path string) ([]uint32, error) { + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true -} - -func (w *watches) byPath(path string) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[w.path[path]] -} - -func (w *watches) byWd(wd uint32) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[wd] + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if strings.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { - w.mu.Lock() - defer w.mu.Unlock() - var existing *watch wd, ok := w.path[path] if ok { @@ -236,20 +132,9 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} +var defaultBufferSize = 0 -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,13 +142,13 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + shared: newShared(ev, errs), + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -271,44 +156,10 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return w, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - return false - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() - if w.isClosed() { - w.closeMu.Unlock() +func (w *inotify) Close() error { + if w.shared.close() { return nil } - close(w.done) - w.closeMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -317,84 +168,114 @@ func (w *Watcher) Close() error { return err } - // Wait for goroutine to close - <-w.doneResp - + <-w.doneResp // Wait for readEvents() to finish. return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + + add := func(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) + } + + w.mu.Lock() + defer w.mu.Unlock() + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } + + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + return add(root, with, true) + }) + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + return add(path, with, false) +} - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } + if e, ok := w.watches.wd[uint32(wd)]; ok { + return e, nil + } + if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,87 +285,80 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + w.mu.Lock() + defer w.mu.Unlock() return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } + w.mu.Lock() + defer w.mu.Unlock() entries := make([]string, 0, w.watches.len()) - w.watches.mu.RLock() for pathname := range w.watches.path { entries = append(entries, pathname) } - w.watches.mu.RUnlock() - return entries } // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) close(w.Events) }() - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) + var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events for { - // See if we have been closed. if w.isClosed() { return } n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: + if err != nil { + if errors.Is(err, os.ErrClosed) { + return + } if !w.sendError(err) { return } @@ -492,13 +366,9 @@ func (w *Watcher) readEvents() { } if n < unix.SizeofInotifyEvent { - var err error + err := errors.New("notify: short read in readEvents()") // Read was too short. if n == 0 { err = io.EOF // If EOF is received. This should really never happen. - } else if n < 0 { - err = errno // If an error occurred while reading. - } else { - err = errors.New("notify: short read in readEvents()") // Read was too short. } if !w.sendError(err) { return @@ -506,74 +376,146 @@ func (w *Watcher) readEvents() { continue } + // We don't know how many events we just read into the buffer While the + // offset points to at least one whole event. var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - ) - - if mask&unix.IN_Q_OVERFLOW != 0 { + // Point to the event in the buffer. + inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 { if !w.sendError(ErrEventOverflow) { return } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - watch := w.watches.byWd(uint32(raw.Wd)) - - // inotify will automatically remove the watch on deletes; just need - // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - w.watches.remove(watch.wd) + ev, ok := w.handleEvent(inEvent, &buf, offset) + if !ok { + return } - // We can't really update the state when a watched path is moved; - // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove - // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { - err := w.remove(watch.path) - if err != nil && !errors.Is(err, ErrNonExistentWatch) { - if !w.sendError(err) { - return - } - } + if !w.sendEvent(ev) { + return } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + inEvent.Len + } + } +} + +func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) { + w.mu.Lock() + defer w.mu.Unlock() + + /// If the event happened to the watched directory or the watched file, the + /// kernel doesn't append the filename to the event, but we would like to + /// always fill the the "Name" field with a valid filename. We retrieve the + /// path of the watch from the "paths" map. + /// + /// Can be nil if Remove() was called in another goroutine for this path + /// inbetween reading the events from the kernel and reading the internal + /// state. Not much we can do about it, so just skip. See #616. + watch := w.watches.byWd(uint32(inEvent.Wd)) + if watch == nil { + return Event{}, true + } + + var ( + name = watch.path + nameLen = uint32(inEvent.Len) + ) + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bb := *buf + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00") + } + + if debug { + internal.Debug(name, inEvent.Mask, inEvent.Cookie) + } + + if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 { + w.watches.remove(watch) + return Event{}, true + } + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch) + } + + // We can't really update the state when a watched path is moved; only + // IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch. + if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { // Do nothing + return Event{}, true + } + + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return Event{}, false } + } + } + + /// Skip if we're watching both this path and the parent; the parent will + /// already send a delete so no need to do it twice. + if inEvent.Mask&unix.IN_DELETE_SELF != 0 { + _, ok := w.watches.path[filepath.Dir(watch.path)] + if ok { + return Event{}, true + } + } - event := w.newEvent(name, mask) + ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return Event{}, false + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all the + // children. + // + // TODO: this is of course pretty slow; we should use a better data + // structure for storing all of this, e.g. store children in the + // watch. I have some code for this in my kqueue refactor we can use + // in the future. For now I'm okay with this as it's not publicly + // available. Correctness first, performance second. + if ev.renamedFrom != "" { + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } } } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen } } + + return ev, true +} + +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +526,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.mu.Lock() + defer w.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a07..340aeec061c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,196 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { + *shared Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) bool { + w.mu.Lock() + defer w.mu.Unlock() + + fd, ok := w.path[path] + if !ok { // Already deleted: don't re-set it here. + return false + } + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info + return true +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +var defaultBufferSize = 0 + +func newBackend(ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + shared: newShared(ev, errs), + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + watches: newWatches(), } go w.readEvents() @@ -193,7 +211,7 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // all. func newKqueue() (kq int, closepipe [2]int, err error) { kq, err = unix.Kqueue() - if kq == -1 { + if err != nil { return kq, closepipe, err } @@ -203,6 +221,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -220,167 +240,72 @@ func newKqueue() (kq int, closepipe [2]int, err error) { return kq, closepipe, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - return false - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + if w.shared.close() { return nil } - w.isClosed = true - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) - close(w.done) - + unix.Close(w.closepipe[1]) // Send "quit" message to readEvents return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + + _, err := w.addWatch(name, noteAllEvents, false) + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +316,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,114 +330,93 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + // Follow symlinks, but only for paths added with Add(), and not paths + // we're adding from internalWatch from a listdir. + if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { - // Return nil because Linux can add unresolvable symlinks to the - // watch list without problems, so maintain consistency with - // that. There will be no file events for broken symlinks. - // TODO: more specific check; returns os.PathError; ENOENT? - return "", nil + return "", err + } + if !filepath.IsAbs(link) { + link = filepath.Join(filepath.Dir(name), link) } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { - return "", nil + return "", err } } // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } if errors.Is(err, unix.EINTR) { continue } - return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + if !w.watches.updateDirFlags(name, flags) { + return "", nil + } if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { + d := name + if info.linkName != "" { + d = info.linkName + } + if err := w.watchDirectoryFiles(d); err != nil { return "", err } } @@ -534,7 +426,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +435,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return + } + + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } - event := w.newEvent(path.name, mask) + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +501,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +538,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +567,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +595,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +605,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,69 +613,62 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + if errors.Is(err, os.ErrNotExist) { + return nil + } + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. - if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true) } - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) + // Watch file to mimic Linux inotify. + return w.addWatch(name, noteAllEvents, true) } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +685,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + //if runtime.GOOS == "freebsd" { + // return true // Supports everything. + //} + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c015f..b8c0ad72267 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,22 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +var defaultBufferSize = 0 + +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d613..3433642d641 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,196 +15,80 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error + done chan chan<- error mu sync.Mutex // Protects access to watches, closed watches watchMap // Map of watches (key: i-number) closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) -} +var defaultBufferSize = 50 -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), - quit: make(chan chan<- error, 1), + done: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { - case ch := <-w.quit: - w.quit <- ch + case ch := <-w.done: + w.done <- ch case w.Events <- event: } return true } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true - case <-w.quit: } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -217,66 +97,30 @@ func (w *Watcher) Close() error { w.closed = true w.mu.Unlock() - // Send "quit" message to the reader goroutine + // Send "done" message to the reader goroutine ch := make(chan error) - w.quit <- ch + w.done <- ch if err := w.wakeupReader(); err != nil { return err } return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +139,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +160,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +171,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +203,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +259,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +267,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +281,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +324,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +379,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +407,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +419,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +465,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +478,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -652,7 +493,7 @@ func (w *Watcher) readEvents() { watch := (*watch)(unsafe.Pointer(ov)) if watch == nil { select { - case ch := <-w.quit: + case ch := <-w.done: w.mu.Lock() var indexes []indexMap for _, index := range w.watches { @@ -700,7 +541,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +574,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +606,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +633,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +644,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +655,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +670,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc4999..f64be4bf98e 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,157 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + //lint:ignore ST1012 not relevant + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event, defaultBufferSize), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event, sz), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// The order is undefined, and may differ per call. Returns nil if +// [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +359,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +391,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +451,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 00000000000..0b01bc182a1 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 00000000000..928319fb09a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 00000000000..3186b0c3491 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 00000000000..f69fdb930f5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 00000000000..607e683bd73 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 00000000000..35c734be431 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 00000000000..e5b3b6f6943 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 00000000000..1dd455bc5a4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 00000000000..f1b2e73bd5b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 00000000000..52bf4ce53b5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 00000000000..5ac8b507978 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 00000000000..7daa45e19ee --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 00000000000..b251fb80386 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd && !plan9 + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 00000000000..37dfeddc289 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 00000000000..896bc2e5a2f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + ErrSyscallEACCES = errors.New("dummy") + ErrUnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae6539..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/shared.go b/vendor/github.com/fsnotify/fsnotify/shared.go new file mode 100644 index 00000000000..3ee9b58f1d2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/shared.go @@ -0,0 +1,64 @@ +package fsnotify + +import "sync" + +type shared struct { + Events chan Event + Errors chan error + done chan struct{} + mu sync.Mutex +} + +func newShared(ev chan Event, errs chan error) *shared { + return &shared{ + Events: ev, + Errors: errs, + done: make(chan struct{}), + } +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *shared) sendEvent(e Event) bool { + if e.Op == 0 { + return true + } + select { + case <-w.done: + return false + case w.Events <- e: + return true + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *shared) sendError(err error) bool { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *shared) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Mark as closed; returns true if it was already closed. +func (w *shared) close() bool { + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return true + } + close(w.done) + return false +} diff --git a/vendor/github.com/fsnotify/fsnotify/staticcheck.conf b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf new file mode 100644 index 00000000000..8fa7351f0c2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf @@ -0,0 +1,3 @@ +checks = ['all', + '-U1000', # Don't complain about unused functions. +] diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b8855..f65e8fe3edc 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78fe..a29fc7aab62 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md index af0a79507e5..d072b81c730 100644 --- a/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -1,30 +1,31 @@ -# CBOR Codec in Go - - +

CBOR Codec Go logo

[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. -`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). +`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). -See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer. ## fxamacker/cbor [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) -[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22) [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) +[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage) `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options. + Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. -
Highlights

+

🔎  Highlights

__🚀  Speed__ @@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili __🗜️  Data Size__ -Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. __:jigsaw:  Usability__ @@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949. `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. -By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). - -

Example decoding with encoding/gob 💥 fatal error (out of memory)

- -```Go -// Example of encoding/gob having "fatal error: runtime: out of memory" -// while decoding 181 bytes. -package main -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" -) - -// Example data is from https://github.com/golang/go/issues/24446 -// (shortened to 181 bytes). -const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + - "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + - "860001013001ff860001013001ffb80000001eff850401010e3030303030" + - "30303030303030303001ff3000010c0104000016ffb70201010830303030" + - "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + - "303030303030303030303030303030303030303030303030303030303030" + - "30" - -type X struct { - J *X - K map[string]int -} - -func main() { - raw, _ := hex.DecodeString(data) - decoder := gob.NewDecoder(bytes.NewReader(raw)) - - var x X - decoder.Decode(&x) // fatal error: runtime: out of memory - fmt.Println("Decoding finished.") -} -``` - -


- -
- -`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to -decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | - -
Benchmark details

- -Latest comparison used: -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) -- go test -bench=. -benchmem -count=20 - -#### Prior comparisons - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | -| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | -| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | - -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.6, linux/amd64, i5-13600K (DDR4) -- go test -bench=. -benchmem -count=20 - -


- -
- -### Smaller Encodings with Struct Tags - -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. - -
Example encoding 3-level nested Go struct to 1 byte CBOR

- -https://go.dev/play/p/YxwvfPdFQG2 - -```Go -// Example encoding nested struct (with omitempty tag) -// - encoding/json: 18 byte JSON -// - fxamacker/cbor: 1 byte CBOR -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/fxamacker/cbor/v2" -) - -type GrandChild struct { - Quux int `json:",omitempty"` -} - -type Child struct { - Baz int `json:",omitempty"` - Qux GrandChild `json:",omitempty"` -} - -type Parent struct { - Foo Child `json:",omitempty"` - Bar int `json:",omitempty"` -} - -func cb() { - results, _ := cbor.Marshal(Parent{}) - fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) - - text, _ := cbor.Diagnose(results) // Diagnostic Notation - fmt.Println("DN: " + text) -} - -func js() { - results, _ := json.Marshal(Parent{}) - fmt.Println("hex(JSON): " + hex.EncodeToString(results)) - - text := string(results) // JSON - fmt.Println("JSON: " + text) -} - -func main() { - cb() - fmt.Println("-------------") - js() -} -``` - -Output (DN is Diagnostic Notation): -``` -hex(CBOR): a0 -DN: {} -------------- -hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d -JSON: {"Foo":{"Qux":{}}} -``` - -


- -
- -Example using different struct tags together: +Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data. + +> [!NOTE] +> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`: +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op | +> +> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference. +> +>
🔎  Benchmark details

+> +> Latest comparison for decoding CBOR data to Go `[]byte`: +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores) +> - go test -bench=. -benchmem -count=20 +> +> #### Prior comparisons +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | +> +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.19.6, linux/amd64, i5-13600K (DDR4) +> - go test -bench=. -benchmem -count=20 +> +>

+ +In contrast, some codecs can crash or use excessive resources while decoding bad data. + +> [!WARNING] +> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). +> +>
🔎  gob fatal error (out of memory) 💥 decoding 181 bytes

+> +> ```Go +> // Example of encoding/gob having "fatal error: runtime: out of memory" +> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024). +> package main +> import ( +> "bytes" +> "encoding/gob" +> "encoding/hex" +> "fmt" +> ) +> +> // Example data is from https://github.com/golang/go/issues/24446 +> // (shortened to 181 bytes). +> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + +> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + +> "860001013001ff860001013001ffb80000001eff850401010e3030303030" + +> "30303030303030303001ff3000010c0104000016ffb70201010830303030" + +> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + +> "303030303030303030303030303030303030303030303030303030303030" + +> "30" +> +> type X struct { +> J *X +> K map[string]int +> } +> +> func main() { +> raw, _ := hex.DecodeString(data) +> decoder := gob.NewDecoder(bytes.NewReader(raw)) +> +> var x X +> decoder.Decode(&x) // fatal error: runtime: out of memory +> fmt.Println("Decoding finished.") +> } +> ``` +> +> +>

+ +### Smaller Encodings with Struct Tag Options + +Struct tags automatically reduce encoded size of structs and improve speed. + +We can write less code by using struct tag options: +- `toarray`: encode without field names (decode back to original struct) +- `keyasint`: encode field names as integers (decode back to original struct) +- `omitempty`: omit empty field when encoding +- `omitzero`: omit zero-value field when encoding + +As a special case, struct field tag "-" omits the field. + +NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field. ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") -API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. +> [!NOTE] +> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte! +> - `encoding/json`: 18 bytes of JSON +> - `fxamacker/cbor`: 1 byte of CBOR +> +>
🔎  Encoding 3-level nested Go struct with omitempty

+> +> https://go.dev/play/p/YxwvfPdFQG2 +> +> ```Go +> // Example encoding nested struct (with omitempty tag) +> // - encoding/json: 18 byte JSON +> // - fxamacker/cbor: 1 byte CBOR +> +> package main +> +> import ( +> "encoding/hex" +> "encoding/json" +> "fmt" +> +> "github.com/fxamacker/cbor/v2" +> ) +> +> type GrandChild struct { +> Quux int `json:",omitempty"` +> } +> +> type Child struct { +> Baz int `json:",omitempty"` +> Qux GrandChild `json:",omitempty"` +> } +> +> type Parent struct { +> Foo Child `json:",omitempty"` +> Bar int `json:",omitempty"` +> } +> +> func cb() { +> results, _ := cbor.Marshal(Parent{}) +> fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) +> +> text, _ := cbor.Diagnose(results) // Diagnostic Notation +> fmt.Println("DN: " + text) +> } +> +> func js() { +> results, _ := json.Marshal(Parent{}) +> fmt.Println("hex(JSON): " + hex.EncodeToString(results)) +> +> text := string(results) // JSON +> fmt.Println("JSON: " + text) +> } +> +> func main() { +> cb() +> fmt.Println("-------------") +> js() +> } +> ``` +> +> Output (DN is Diagnostic Notation): +> ``` +> hex(CBOR): a0 +> DN: {} +> ------------- +> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +> JSON: {"Foo":{"Qux":{}}} +> ``` +> +>

+ ## Quick Start __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. +> [!TIP] +> +> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta). +> +>
🔎  More about tinygo feature branch +> +> ### Tinygo +> +> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go). +> +> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo. +> +> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet. +> +> Changes in this feature branch only affect tinygo compiled software. Summary of changes: +> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33. +> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature. +> - encoding error message can be different when encoding function type. +> +> Related tinygo issues: +> - https://github.com/tinygo-org/tinygo/issues/4277 +> - https://github.com/tinygo-org/tinygo/issues/4458 +> +>
+ + ### Key Points This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). @@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text -// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, -// but new funcs UnmarshalFirst and DiagnoseFirst do not. +// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but +// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes. ``` -__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. - -- Different CBOR libraries may use different default settings. -- CBOR-based formats or protocols usually require specific settings. - -For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. +> [!IMPORTANT] +> CBOR settings allow trade-offs between speed, security, encoding size, etc. +> +> - Different CBOR libraries may use different default settings. +> - CBOR-based formats or protocols usually require specific settings. +> +> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. ### Presets @@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf ### Struct Tags -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs. + +As a special case, struct field tag "-" omits the field. + +
🔎  Example encoding with struct field tag "-"

+ +https://go.dev/play/p/aWEIFxd7InX + +```Go +// https://github.com/fxamacker/cbor/issues/652 +package main + +import ( + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// The `cbor:"-"` tag omits the Type field when encoding to CBOR. +type Entity struct { + _ struct{} `cbor:",toarray"` + ID uint64 `json:"id"` + Type string `cbor:"-" json:"typeOf"` + Name string `json:"name"` +} + +func main() { + entity := Entity{ + ID: 1, + Type: "int64", + Name: "Identifier", + } + + c, _ := cbor.Marshal(entity) + diag, _ := cbor.Diagnose(c) + fmt.Printf("CBOR in hex: %x\n", c) + fmt.Printf("CBOR in edn: %s\n", diag) + + j, _ := json.Marshal(entity) + fmt.Printf("JSON: %s\n", string(j)) + + fmt.Printf("JSON encoding is %d bytes\n", len(j)) + fmt.Printf("CBOR encoding is %d bytes\n", len(c)) + + // Output: + // CBOR in hex: 82016a4964656e746966696572 + // CBOR in edn: [1, "Identifier"] + // JSON: {"id":1,"typeOf":"int64","name":"Identifier"} + // JSON encoding is 45 bytes + // CBOR encoding is 13 bytes +} +``` + +

-
Example encoding 3-level nested Go struct to 1 byte CBOR

+

🔎  Example encoding 3-level nested Go struct to 1 byte CBOR

https://go.dev/play/p/YxwvfPdFQG2 @@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}

-
Example using several struct tags

+

🔎  Example using struct tag options

![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. +Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. ### CBOR Tags @@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags `TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. -
Example using TagSet and TagOptions

+

🔎  Example using TagSet and TagOptions

```go // Use signedCWT struct defined in "Decoding CWT" example. @@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil { em, _ := cbor.EncOptions{}.EncModeWithTags(tags) // Marshal signedCWT with tag number. -if data, err := cbor.Marshal(v); err != nil { +if data, err := em.Marshal(v); err != nil { return err } ```

+👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces. + +Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc. + +The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2). + +
🔎  Example using Embedded JSON Tag for CBOR (tag 262) + +```go +// https://github.com/fxamacker/cbor/issues/657 + +package cbor_test + +// NOTE: RFC 8949 does not mention tag number 262. IANA assigned +// CBOR tag number 262 as "Embedded JSON Object" specified by the +// document Embedded JSON Tag for CBOR: +// +// "Tag 262 can be applied to a byte string (major type 2) to indicate +// that the byte string is a JSON Object. The length of the byte string +// indicates the content." +// +// For more info, see Embedded JSON Tag for CBOR at: +// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// cborTagNumForEmbeddedJSON is the CBOR tag number 262. +const cborTagNumForEmbeddedJSON = 262 + +// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item +// with tag number 262 and the tag content is a JSON object "embedded" as a +// CBOR byte string (major type 2). +type EmbeddedJSON struct { + any +} + +func NewEmbeddedJSON(val any) EmbeddedJSON { + return EmbeddedJSON{val} +} + +// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the +// tag number 262 and the tag content is a JSON object that is +// "embedded" as a CBOR byte string. +func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) { + // Encode v to JSON object. + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // Create cbor.Tag representing a tagged CBOR data item. + tag := cbor.Tag{ + Number: cborTagNumForEmbeddedJSON, + Content: data, + } + + // Marshal to a tagged CBOR data item. + return cbor.Marshal(tag) +} + +// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON. +// The byte slice provided to this function must contain a single +// tagged CBOR data item with the tag number 262 and tag content +// must be a JSON object "embedded" as a CBOR byte string. +func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error { + // Unmarshal tagged CBOR data item. + var tag cbor.Tag + if err := cbor.Unmarshal(b, &tag); err != nil { + return err + } + + // Check tag number. + if tag.Number != cborTagNumForEmbeddedJSON { + return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON) + } + + // Check tag content. + jsonData, isByteString := tag.Content.([]byte) + if !isByteString { + return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content) + } + + // Unmarshal JSON object. + return json.Unmarshal(jsonData, v) +} + +// MarshalJSON encodes EmbeddedJSON to a JSON object. +func (v EmbeddedJSON) MarshalJSON() ([]byte, error) { + return json.Marshal(v.any) +} + +// UnmarshalJSON decodes a JSON object. +func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error { + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + return dec.Decode(&v.any) +} + +func Example_embeddedJSONTagForCBOR() { + value := NewEmbeddedJSON(map[string]any{ + "name": "gopher", + "id": json.Number("42"), + }) + + data, err := cbor.Marshal(value) + if err != nil { + panic(err) + } + + fmt.Printf("cbor: %x\n", data) + + var v EmbeddedJSON + err = cbor.Unmarshal(data, &v) + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", v.any) + for k, v := range v.any.(map[string]any) { + fmt.Printf(" %s: %v (%T)\n", k, v, v) + } +} +``` + +
+ + ### Functions and Interfaces -
Functions and interfaces at a glance

+

🔎  Functions and interfaces at a glance

Common functions with same API as `encoding/json`: - `Marshal`, `Unmarshal` @@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed. Other useful functions: - `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. - `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. -- `Wellformed` returns true if the the CBOR data item is well-formed. +- `Wellformed` returns true if the CBOR data item is well-formed. Interfaces identical or comparable to Go `encoding` packages include: `Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. @@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. +- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. +- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. +- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. + +v2.9.0 passed fuzz tests and is production quality. + +The minimum version of Go required to build: +- v2.8.0 and newer releases require go 1.20+. +- v2.7.1 and older releases require go 1.17+. For more details, see [release notes](https://github.com/fxamacker/cbor/releases). -### Prior Release +### Prior Releases + +[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. + +[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. -v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). +[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). __IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. @@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. +

CBOR Codec Go logo

[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. -`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). +`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). -See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer. ## fxamacker/cbor [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) -[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22) [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) +[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage) `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options. + Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. -
Highlights

+

🔎  Highlights

__🚀  Speed__ @@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili __🗜️  Data Size__ -Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. __:jigsaw:  Usability__ @@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949. `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. -By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). - -

Example decoding with encoding/gob 💥 fatal error (out of memory)

- -```Go -// Example of encoding/gob having "fatal error: runtime: out of memory" -// while decoding 181 bytes. -package main -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" -) - -// Example data is from https://github.com/golang/go/issues/24446 -// (shortened to 181 bytes). -const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + - "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + - "860001013001ff860001013001ffb80000001eff850401010e3030303030" + - "30303030303030303001ff3000010c0104000016ffb70201010830303030" + - "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + - "303030303030303030303030303030303030303030303030303030303030" + - "30" - -type X struct { - J *X - K map[string]int -} - -func main() { - raw, _ := hex.DecodeString(data) - decoder := gob.NewDecoder(bytes.NewReader(raw)) - - var x X - decoder.Decode(&x) // fatal error: runtime: out of memory - fmt.Println("Decoding finished.") -} -``` - -


- -
- -`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to -decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | - -
Benchmark details

- -Latest comparison used: -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) -- go test -bench=. -benchmem -count=20 - -#### Prior comparisons - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | -| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | -| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | - -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.6, linux/amd64, i5-13600K (DDR4) -- go test -bench=. -benchmem -count=20 - -


- -
- -### Smaller Encodings with Struct Tags - -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. - -
Example encoding 3-level nested Go struct to 1 byte CBOR

- -https://go.dev/play/p/YxwvfPdFQG2 - -```Go -// Example encoding nested struct (with omitempty tag) -// - encoding/json: 18 byte JSON -// - fxamacker/cbor: 1 byte CBOR -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/fxamacker/cbor/v2" -) - -type GrandChild struct { - Quux int `json:",omitempty"` -} - -type Child struct { - Baz int `json:",omitempty"` - Qux GrandChild `json:",omitempty"` -} - -type Parent struct { - Foo Child `json:",omitempty"` - Bar int `json:",omitempty"` -} - -func cb() { - results, _ := cbor.Marshal(Parent{}) - fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) - - text, _ := cbor.Diagnose(results) // Diagnostic Notation - fmt.Println("DN: " + text) -} - -func js() { - results, _ := json.Marshal(Parent{}) - fmt.Println("hex(JSON): " + hex.EncodeToString(results)) - - text := string(results) // JSON - fmt.Println("JSON: " + text) -} - -func main() { - cb() - fmt.Println("-------------") - js() -} -``` - -Output (DN is Diagnostic Notation): -``` -hex(CBOR): a0 -DN: {} -------------- -hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d -JSON: {"Foo":{"Qux":{}}} -``` - -


- -
- -Example using different struct tags together: +Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data. + +> [!NOTE] +> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`: +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op | +> +> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference. +> +>
🔎  Benchmark details

+> +> Latest comparison for decoding CBOR data to Go `[]byte`: +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores) +> - go test -bench=. -benchmem -count=20 +> +> #### Prior comparisons +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | +> +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.19.6, linux/amd64, i5-13600K (DDR4) +> - go test -bench=. -benchmem -count=20 +> +>

+ +In contrast, some codecs can crash or use excessive resources while decoding bad data. + +> [!WARNING] +> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). +> +>
🔎  gob fatal error (out of memory) 💥 decoding 181 bytes

+> +> ```Go +> // Example of encoding/gob having "fatal error: runtime: out of memory" +> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024). +> package main +> import ( +> "bytes" +> "encoding/gob" +> "encoding/hex" +> "fmt" +> ) +> +> // Example data is from https://github.com/golang/go/issues/24446 +> // (shortened to 181 bytes). +> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + +> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + +> "860001013001ff860001013001ffb80000001eff850401010e3030303030" + +> "30303030303030303001ff3000010c0104000016ffb70201010830303030" + +> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + +> "303030303030303030303030303030303030303030303030303030303030" + +> "30" +> +> type X struct { +> J *X +> K map[string]int +> } +> +> func main() { +> raw, _ := hex.DecodeString(data) +> decoder := gob.NewDecoder(bytes.NewReader(raw)) +> +> var x X +> decoder.Decode(&x) // fatal error: runtime: out of memory +> fmt.Println("Decoding finished.") +> } +> ``` +> +> +>

+ +### Smaller Encodings with Struct Tag Options + +Struct tags automatically reduce encoded size of structs and improve speed. + +We can write less code by using struct tag options: +- `toarray`: encode without field names (decode back to original struct) +- `keyasint`: encode field names as integers (decode back to original struct) +- `omitempty`: omit empty field when encoding +- `omitzero`: omit zero-value field when encoding + +As a special case, struct field tag "-" omits the field. + +NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field. ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") -API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. +> [!NOTE] +> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte! +> - `encoding/json`: 18 bytes of JSON +> - `fxamacker/cbor`: 1 byte of CBOR +> +>
🔎  Encoding 3-level nested Go struct with omitempty

+> +> https://go.dev/play/p/YxwvfPdFQG2 +> +> ```Go +> // Example encoding nested struct (with omitempty tag) +> // - encoding/json: 18 byte JSON +> // - fxamacker/cbor: 1 byte CBOR +> +> package main +> +> import ( +> "encoding/hex" +> "encoding/json" +> "fmt" +> +> "github.com/fxamacker/cbor/v2" +> ) +> +> type GrandChild struct { +> Quux int `json:",omitempty"` +> } +> +> type Child struct { +> Baz int `json:",omitempty"` +> Qux GrandChild `json:",omitempty"` +> } +> +> type Parent struct { +> Foo Child `json:",omitempty"` +> Bar int `json:",omitempty"` +> } +> +> func cb() { +> results, _ := cbor.Marshal(Parent{}) +> fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) +> +> text, _ := cbor.Diagnose(results) // Diagnostic Notation +> fmt.Println("DN: " + text) +> } +> +> func js() { +> results, _ := json.Marshal(Parent{}) +> fmt.Println("hex(JSON): " + hex.EncodeToString(results)) +> +> text := string(results) // JSON +> fmt.Println("JSON: " + text) +> } +> +> func main() { +> cb() +> fmt.Println("-------------") +> js() +> } +> ``` +> +> Output (DN is Diagnostic Notation): +> ``` +> hex(CBOR): a0 +> DN: {} +> ------------- +> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +> JSON: {"Foo":{"Qux":{}}} +> ``` +> +>

+ ## Quick Start __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. +> [!TIP] +> +> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta). +> +>
🔎  More about tinygo feature branch +> +> ### Tinygo +> +> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go). +> +> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo. +> +> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet. +> +> Changes in this feature branch only affect tinygo compiled software. Summary of changes: +> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33. +> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature. +> - encoding error message can be different when encoding function type. +> +> Related tinygo issues: +> - https://github.com/tinygo-org/tinygo/issues/4277 +> - https://github.com/tinygo-org/tinygo/issues/4458 +> +>
+ + ### Key Points This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). @@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text -// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, -// but new funcs UnmarshalFirst and DiagnoseFirst do not. +// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but +// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes. ``` -__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. - -- Different CBOR libraries may use different default settings. -- CBOR-based formats or protocols usually require specific settings. - -For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. +> [!IMPORTANT] +> CBOR settings allow trade-offs between speed, security, encoding size, etc. +> +> - Different CBOR libraries may use different default settings. +> - CBOR-based formats or protocols usually require specific settings. +> +> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. ### Presets @@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf ### Struct Tags -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs. + +As a special case, struct field tag "-" omits the field. + +
🔎  Example encoding with struct field tag "-"

+ +https://go.dev/play/p/aWEIFxd7InX + +```Go +// https://github.com/fxamacker/cbor/issues/652 +package main + +import ( + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// The `cbor:"-"` tag omits the Type field when encoding to CBOR. +type Entity struct { + _ struct{} `cbor:",toarray"` + ID uint64 `json:"id"` + Type string `cbor:"-" json:"typeOf"` + Name string `json:"name"` +} + +func main() { + entity := Entity{ + ID: 1, + Type: "int64", + Name: "Identifier", + } + + c, _ := cbor.Marshal(entity) + diag, _ := cbor.Diagnose(c) + fmt.Printf("CBOR in hex: %x\n", c) + fmt.Printf("CBOR in edn: %s\n", diag) + + j, _ := json.Marshal(entity) + fmt.Printf("JSON: %s\n", string(j)) + + fmt.Printf("JSON encoding is %d bytes\n", len(j)) + fmt.Printf("CBOR encoding is %d bytes\n", len(c)) + + // Output: + // CBOR in hex: 82016a4964656e746966696572 + // CBOR in edn: [1, "Identifier"] + // JSON: {"id":1,"typeOf":"int64","name":"Identifier"} + // JSON encoding is 45 bytes + // CBOR encoding is 13 bytes +} +``` + +

-
Example encoding 3-level nested Go struct to 1 byte CBOR

+

🔎  Example encoding 3-level nested Go struct to 1 byte CBOR

https://go.dev/play/p/YxwvfPdFQG2 @@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}

-
Example using several struct tags

+

🔎  Example using struct tag options

![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. +Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. ### CBOR Tags @@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags `TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. -
Example using TagSet and TagOptions

+

🔎  Example using TagSet and TagOptions

```go // Use signedCWT struct defined in "Decoding CWT" example. @@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil { em, _ := cbor.EncOptions{}.EncModeWithTags(tags) // Marshal signedCWT with tag number. -if data, err := cbor.Marshal(v); err != nil { +if data, err := em.Marshal(v); err != nil { return err } ```

+👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces. + +Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc. + +The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2). + +
🔎  Example using Embedded JSON Tag for CBOR (tag 262) + +```go +// https://github.com/fxamacker/cbor/issues/657 + +package cbor_test + +// NOTE: RFC 8949 does not mention tag number 262. IANA assigned +// CBOR tag number 262 as "Embedded JSON Object" specified by the +// document Embedded JSON Tag for CBOR: +// +// "Tag 262 can be applied to a byte string (major type 2) to indicate +// that the byte string is a JSON Object. The length of the byte string +// indicates the content." +// +// For more info, see Embedded JSON Tag for CBOR at: +// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// cborTagNumForEmbeddedJSON is the CBOR tag number 262. +const cborTagNumForEmbeddedJSON = 262 + +// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item +// with tag number 262 and the tag content is a JSON object "embedded" as a +// CBOR byte string (major type 2). +type EmbeddedJSON struct { + any +} + +func NewEmbeddedJSON(val any) EmbeddedJSON { + return EmbeddedJSON{val} +} + +// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the +// tag number 262 and the tag content is a JSON object that is +// "embedded" as a CBOR byte string. +func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) { + // Encode v to JSON object. + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // Create cbor.Tag representing a tagged CBOR data item. + tag := cbor.Tag{ + Number: cborTagNumForEmbeddedJSON, + Content: data, + } + + // Marshal to a tagged CBOR data item. + return cbor.Marshal(tag) +} + +// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON. +// The byte slice provided to this function must contain a single +// tagged CBOR data item with the tag number 262 and tag content +// must be a JSON object "embedded" as a CBOR byte string. +func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error { + // Unmarshal tagged CBOR data item. + var tag cbor.Tag + if err := cbor.Unmarshal(b, &tag); err != nil { + return err + } + + // Check tag number. + if tag.Number != cborTagNumForEmbeddedJSON { + return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON) + } + + // Check tag content. + jsonData, isByteString := tag.Content.([]byte) + if !isByteString { + return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content) + } + + // Unmarshal JSON object. + return json.Unmarshal(jsonData, v) +} + +// MarshalJSON encodes EmbeddedJSON to a JSON object. +func (v EmbeddedJSON) MarshalJSON() ([]byte, error) { + return json.Marshal(v.any) +} + +// UnmarshalJSON decodes a JSON object. +func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error { + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + return dec.Decode(&v.any) +} + +func Example_embeddedJSONTagForCBOR() { + value := NewEmbeddedJSON(map[string]any{ + "name": "gopher", + "id": json.Number("42"), + }) + + data, err := cbor.Marshal(value) + if err != nil { + panic(err) + } + + fmt.Printf("cbor: %x\n", data) + + var v EmbeddedJSON + err = cbor.Unmarshal(data, &v) + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", v.any) + for k, v := range v.any.(map[string]any) { + fmt.Printf(" %s: %v (%T)\n", k, v, v) + } +} +``` + +
+ + ### Functions and Interfaces -
Functions and interfaces at a glance

+

🔎  Functions and interfaces at a glance

Common functions with same API as `encoding/json`: - `Marshal`, `Unmarshal` @@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed. Other useful functions: - `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. - `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. -- `Wellformed` returns true if the the CBOR data item is well-formed. +- `Wellformed` returns true if the CBOR data item is well-formed. Interfaces identical or comparable to Go `encoding` packages include: `Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. @@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. +- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. +- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. +- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. + +v2.9.0 passed fuzz tests and is production quality. + +The minimum version of Go required to build: +- v2.8.0 and newer releases require go 1.20+. +- v2.7.1 and older releases require go 1.17+. For more details, see [release notes](https://github.com/fxamacker/cbor/releases). -### Prior Release +### Prior Releases + +[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. + +[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. -v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). +[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). __IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. @@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - diff --git a/e2e/vendor/github.com/Microsoft/go-winio/backup.go b/e2e/vendor/github.com/Microsoft/go-winio/backup.go deleted file mode 100644 index b54341daacb..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/backup.go +++ /dev/null @@ -1,287 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "runtime" - "unicode/utf16" - - "github.com/Microsoft/go-winio/internal/fs" - "golang.org/x/sys/windows" -) - -//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite - -const ( - BackupData = uint32(iota + 1) - BackupEaData - BackupSecurity - BackupAlternateData - BackupLink - BackupPropertyData - BackupObjectId //revive:disable-line:var-naming ID, not Id - BackupReparseData - BackupSparseBlock - BackupTxfsData -) - -const ( - StreamSparseAttributes = uint32(8) -) - -//nolint:revive // var-naming: ALL_CAPS -const ( - WRITE_DAC = windows.WRITE_DAC - WRITE_OWNER = windows.WRITE_OWNER - ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY -) - -// BackupHeader represents a backup stream of a file. -type BackupHeader struct { - //revive:disable-next-line:var-naming ID, not Id - Id uint32 // The backup stream ID - Attributes uint32 // Stream attributes - Size int64 // The size of the stream in bytes - Name string // The name of the stream (for BackupAlternateData only). - Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). -} - -type win32StreamID struct { - StreamID uint32 - Attributes uint32 - Size uint64 - NameSize uint32 -} - -// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series -// of BackupHeader values. -type BackupStreamReader struct { - r io.Reader - bytesLeft int64 -} - -// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. -func NewBackupStreamReader(r io.Reader) *BackupStreamReader { - return &BackupStreamReader{r, 0} -} - -// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if -// it was not completely read. -func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this - if s, ok := r.r.(io.Seeker); ok { - // Make sure Seek on io.SeekCurrent sometimes succeeds - // before trying the actual seek. - if _, err := s.Seek(0, io.SeekCurrent); err == nil { - if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { - return nil, err - } - r.bytesLeft = 0 - } - } - if _, err := io.Copy(io.Discard, r); err != nil { - return nil, err - } - } - var wsi win32StreamID - if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { - return nil, err - } - hdr := &BackupHeader{ - Id: wsi.StreamID, - Attributes: wsi.Attributes, - Size: int64(wsi.Size), - } - if wsi.NameSize != 0 { - name := make([]uint16, int(wsi.NameSize/2)) - if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { - return nil, err - } - hdr.Name = windows.UTF16ToString(name) - } - if wsi.StreamID == BackupSparseBlock { - if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { - return nil, err - } - hdr.Size -= 8 - } - r.bytesLeft = hdr.Size - return hdr, nil -} - -// Read reads from the current backup stream. -func (r *BackupStreamReader) Read(b []byte) (int, error) { - if r.bytesLeft == 0 { - return 0, io.EOF - } - if int64(len(b)) > r.bytesLeft { - b = b[:r.bytesLeft] - } - n, err := r.r.Read(b) - r.bytesLeft -= int64(n) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if r.bytesLeft == 0 && err == nil { - err = io.EOF - } - return n, err -} - -// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. -type BackupStreamWriter struct { - w io.Writer - bytesLeft int64 -} - -// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. -func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { - return &BackupStreamWriter{w, 0} -} - -// WriteHeader writes the next backup stream header and prepares for calls to Write(). -func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { - if w.bytesLeft != 0 { - return fmt.Errorf("missing %d bytes", w.bytesLeft) - } - name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamID{ - StreamID: hdr.Id, - Attributes: hdr.Attributes, - Size: uint64(hdr.Size), - NameSize: uint32(len(name) * 2), - } - if hdr.Id == BackupSparseBlock { - // Include space for the int64 block offset - wsi.Size += 8 - } - if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { - return err - } - if len(name) != 0 { - if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { - return err - } - } - if hdr.Id == BackupSparseBlock { - if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { - return err - } - } - w.bytesLeft = hdr.Size - return nil -} - -// Write writes to the current backup stream. -func (w *BackupStreamWriter) Write(b []byte) (int, error) { - if w.bytesLeft < int64(len(b)) { - return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) - } - n, err := w.w.Write(b) - w.bytesLeft -= int64(n) - return n, err -} - -// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. -type BackupFileReader struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, -// Read will attempt to read the security descriptor of the file. -func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { - r := &BackupFileReader{f, includeSecurity, 0} - return r -} - -// Read reads a backup stream from the file by calling the Win32 API BackupRead(). -func (r *BackupFileReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) - if err != nil { - return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} - } - runtime.KeepAlive(r.f) - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees Win32 resources associated with the BackupFileReader. It does not close -// the underlying file. -func (r *BackupFileReader) Close() error { - if r.ctx != 0 { - _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) - runtime.KeepAlive(r.f) - r.ctx = 0 - } - return nil -} - -// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. -type BackupFileWriter struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, -// Write() will attempt to restore the security descriptor from the stream. -func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { - w := &BackupFileWriter{f, includeSecurity, 0} - return w -} - -// Write restores a portion of the file using the provided backup stream. -func (w *BackupFileWriter) Write(b []byte) (int, error) { - var bytesWritten uint32 - err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) - if err != nil { - return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} - } - runtime.KeepAlive(w.f) - if int(bytesWritten) != len(b) { - return int(bytesWritten), errors.New("not all bytes could be written") - } - return len(b), nil -} - -// Close frees Win32 resources associated with the BackupFileWriter. It does not -// close the underlying file. -func (w *BackupFileWriter) Close() error { - if w.ctx != 0 { - _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) - runtime.KeepAlive(w.f) - w.ctx = 0 - } - return nil -} - -// OpenForBackup opens a file or directory, potentially skipping access checks if the backup -// or restore privileges have been acquired. -// -// If the file opened was a directory, it cannot be used with Readdir(). -func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - h, err := fs.CreateFile(path, - fs.AccessMask(access), - fs.FileShareMode(share), - nil, - fs.FileCreationDisposition(createmode), - fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT, - 0, - ) - if err != nil { - err = &os.PathError{Op: "open", Path: path, Err: err} - return nil, err - } - return os.NewFile(uintptr(h), path), nil -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/doc.go b/e2e/vendor/github.com/Microsoft/go-winio/doc.go deleted file mode 100644 index 1f5bfe2d548..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// This package provides utilities for efficiently performing Win32 IO operations in Go. -// Currently, this package is provides support for genreal IO and management of -// - named pipes -// - files -// - [Hyper-V sockets] -// -// This code is similar to Go's [net] package, and uses IO completion ports to avoid -// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. -// -// This limits support to Windows Vista and newer operating systems. -// -// Additionally, this package provides support for: -// - creating and managing GUIDs -// - writing to [ETW] -// - opening and manageing VHDs -// - parsing [Windows Image files] -// - auto-generating Win32 API code -// -// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service -// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- -// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images -package winio diff --git a/e2e/vendor/github.com/Microsoft/go-winio/ea.go b/e2e/vendor/github.com/Microsoft/go-winio/ea.go deleted file mode 100644 index e104dbdfdf9..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/ea.go +++ /dev/null @@ -1,137 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "errors" -) - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return ea, nb, err - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return ea, nb, err - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return ea, nb, err -} - -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return eas, err -} - -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/file.go b/e2e/vendor/github.com/Microsoft/go-winio/file.go deleted file mode 100644 index fe82a180dbd..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/file.go +++ /dev/null @@ -1,320 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "io" - "runtime" - "sync" - "sync/atomic" - "syscall" - "time" - - "golang.org/x/sys/windows" -) - -//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -var ( - ErrFileClosed = errors.New("file has already been closed") - ErrTimeout = &timeoutError{} -) - -type timeoutError struct{} - -func (*timeoutError) Error() string { return "i/o timeout" } -func (*timeoutError) Timeout() bool { return true } -func (*timeoutError) Temporary() bool { return true } - -type timeoutChan chan struct{} - -var ioInitOnce sync.Once -var ioCompletionPort windows.Handle - -// ioResult contains the result of an asynchronous IO operation. -type ioResult struct { - bytes uint32 - err error -} - -// ioOperation represents an outstanding asynchronous Win32 IO. -type ioOperation struct { - o windows.Overlapped - ch chan ioResult -} - -func initIO() { - h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - panic(err) - } - ioCompletionPort = h - go ioCompletionProcessor(h) -} - -// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. -// It takes ownership of this handle and will close it if it is garbage collected. -type win32File struct { - handle windows.Handle - wg sync.WaitGroup - wgLock sync.RWMutex - closing atomic.Bool - socket bool - readDeadline deadlineHandler - writeDeadline deadlineHandler -} - -type deadlineHandler struct { - setLock sync.Mutex - channel timeoutChan - channelLock sync.RWMutex - timer *time.Timer - timedout atomic.Bool -} - -// makeWin32File makes a new win32File from an existing file handle. -func makeWin32File(h windows.Handle) (*win32File, error) { - f := &win32File{handle: h} - ioInitOnce.Do(initIO) - _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) - if err != nil { - return nil, err - } - err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) - if err != nil { - return nil, err - } - f.readDeadline.channel = make(timeoutChan) - f.writeDeadline.channel = make(timeoutChan) - return f, nil -} - -// Deprecated: use NewOpenFile instead. -func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - return NewOpenFile(windows.Handle(h)) -} - -func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) { - // If we return the result of makeWin32File directly, it can result in an - // interface-wrapped nil, rather than a nil interface value. - f, err := makeWin32File(h) - if err != nil { - return nil, err - } - return f, nil -} - -// closeHandle closes the resources associated with a Win32 handle. -func (f *win32File) closeHandle() { - f.wgLock.Lock() - // Atomically set that we are closing, releasing the resources only once. - if !f.closing.Swap(true) { - f.wgLock.Unlock() - // cancel all IO and wait for it to complete - _ = cancelIoEx(f.handle, nil) - f.wg.Wait() - // at this point, no new IO can start - windows.Close(f.handle) - f.handle = 0 - } else { - f.wgLock.Unlock() - } -} - -// Close closes a win32File. -func (f *win32File) Close() error { - f.closeHandle() - return nil -} - -// IsClosed checks if the file has been closed. -func (f *win32File) IsClosed() bool { - return f.closing.Load() -} - -// prepareIO prepares for a new IO operation. -// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIO() (*ioOperation, error) { - f.wgLock.RLock() - if f.closing.Load() { - f.wgLock.RUnlock() - return nil, ErrFileClosed - } - f.wg.Add(1) - f.wgLock.RUnlock() - c := &ioOperation{} - c.ch = make(chan ioResult) - return c, nil -} - -// ioCompletionProcessor processes completed async IOs forever. -func ioCompletionProcessor(h windows.Handle) { - for { - var bytes uint32 - var key uintptr - var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE) - if op == nil { - panic(err) - } - op.ch <- ioResult{bytes, err} - } -} - -// todo: helsaawy - create an asyncIO version that takes a context - -// asyncIO processes the return value from ReadFile or WriteFile, blocking until -// the operation has actually completed. -func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno - return int(bytes), err - } - - if f.closing.Load() { - _ = cancelIoEx(f.handle, &c.o) - } - - var timeout timeoutChan - if d != nil { - d.channelLock.Lock() - timeout = d.channel - d.channelLock.Unlock() - } - - var r ioResult - select { - case r = <-c.ch: - err = r.err - if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - if f.closing.Load() { - err = ErrFileClosed - } - } else if err != nil && f.socket { - // err is from Win32. Query the overlapped structure to get the winsock error. - var bytes, flags uint32 - err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) - } - case <-timeout: - _ = cancelIoEx(f.handle, &c.o) - r = <-c.ch - err = r.err - if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - err = ErrTimeout - } - } - - // runtime.KeepAlive is needed, as c is passed via native - // code to ioCompletionProcessor, c must remain alive - // until the channel read is complete. - // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? - runtime.KeepAlive(c) - return int(r.bytes), err -} - -// Read reads from a file handle. -func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIO() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.readDeadline.timedout.Load() { - return 0, ErrTimeout - } - - var bytes uint32 - err = windows.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIO(c, &f.readDeadline, bytes, err) - runtime.KeepAlive(b) - - // Handle EOF conditions. - if err == nil && n == 0 && len(b) != 0 { - return 0, io.EOF - } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno - return 0, io.EOF - } - return n, err -} - -// Write writes to a file handle. -func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIO() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.writeDeadline.timedout.Load() { - return 0, ErrTimeout - } - - var bytes uint32 - err = windows.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) - runtime.KeepAlive(b) - return n, err -} - -func (f *win32File) SetReadDeadline(deadline time.Time) error { - return f.readDeadline.set(deadline) -} - -func (f *win32File) SetWriteDeadline(deadline time.Time) error { - return f.writeDeadline.set(deadline) -} - -func (f *win32File) Flush() error { - return windows.FlushFileBuffers(f.handle) -} - -func (f *win32File) Fd() uintptr { - return uintptr(f.handle) -} - -func (d *deadlineHandler) set(deadline time.Time) error { - d.setLock.Lock() - defer d.setLock.Unlock() - - if d.timer != nil { - if !d.timer.Stop() { - <-d.channel - } - d.timer = nil - } - d.timedout.Store(false) - - select { - case <-d.channel: - d.channelLock.Lock() - d.channel = make(chan struct{}) - d.channelLock.Unlock() - default: - } - - if deadline.IsZero() { - return nil - } - - timeoutIO := func() { - d.timedout.Store(true) - close(d.channel) - } - - now := time.Now() - duration := deadline.Sub(now) - if deadline.After(now) { - // Deadline is in the future, set a timer to wait - d.timer = time.AfterFunc(duration, timeoutIO) - } else { - // Deadline is in the past. Cancel all pending IO now. - timeoutIO() - } - return nil -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/fileinfo.go b/e2e/vendor/github.com/Microsoft/go-winio/fileinfo.go deleted file mode 100644 index c860eb9917a..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ /dev/null @@ -1,106 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "os" - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -// FileBasicInfo contains file access time and file attributes information. -type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime - FileAttributes uint32 - _ uint32 // padding -} - -// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing -// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64 -// alignment is necessary to pass this as FILE_BASIC_INFO. -type alignedFileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64 - FileAttributes uint32 - _ uint32 // padding -} - -// GetFileBasicInfo retrieves times and attributes for a file. -func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &alignedFileBasicInfo{} - if err := windows.GetFileInformationByHandleEx( - windows.Handle(f.Fd()), - windows.FileBasicInfo, - (*byte)(unsafe.Pointer(bi)), - uint32(unsafe.Sizeof(*bi)), - ); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the - // public API of this module. The data may be unnecessarily aligned. - return (*FileBasicInfo)(unsafe.Pointer(bi)), nil -} - -// SetFileBasicInfo sets times and attributes for a file. -func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is - // suitable to pass to GetFileInformationByHandleEx. - biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi)) - if err := windows.SetFileInformationByHandle( - windows.Handle(f.Fd()), - windows.FileBasicInfo, - (*byte)(unsafe.Pointer(&biAligned)), - uint32(unsafe.Sizeof(biAligned)), - ); err != nil { - return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return nil -} - -// FileStandardInfo contains extended information for the file. -// FILE_STANDARD_INFO in WinBase.h -// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info -type FileStandardInfo struct { - AllocationSize, EndOfFile int64 - NumberOfLinks uint32 - DeletePending, Directory bool -} - -// GetFileStandardInfo retrieves ended information for the file. -func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { - si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), - windows.FileStandardInfo, - (*byte)(unsafe.Pointer(si)), - uint32(unsafe.Sizeof(*si))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return si, nil -} - -// FileIDInfo contains the volume serial number and file ID for a file. This pair should be -// unique on a system. -type FileIDInfo struct { - VolumeSerialNumber uint64 - FileID [16]byte -} - -// GetFileID retrieves the unique (volume, file ID) pair for a file. -func GetFileID(f *os.File) (*FileIDInfo, error) { - fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx( - windows.Handle(f.Fd()), - windows.FileIdInfo, - (*byte)(unsafe.Pointer(fileID)), - uint32(unsafe.Sizeof(*fileID)), - ); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return fileID, nil -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/hvsock.go b/e2e/vendor/github.com/Microsoft/go-winio/hvsock.go deleted file mode 100644 index c4fdd9d4aec..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/hvsock.go +++ /dev/null @@ -1,582 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "time" - "unsafe" - - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/socket" - "github.com/Microsoft/go-winio/pkg/guid" -) - -const afHVSock = 34 // AF_HYPERV - -// Well known Service and VM IDs -// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards - -// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. -func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 - return guid.GUID{} -} - -// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. -func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff - return guid.GUID{ - Data1: 0xffffffff, - Data2: 0xffff, - Data3: 0xffff, - Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - } -} - -// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. -func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 - return guid.GUID{ - Data1: 0xe0e16197, - Data2: 0xdd56, - Data3: 0x4a10, - Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, - } -} - -// HvsockGUIDSiloHost is the address of a silo's host partition: -// - The silo host of a hosted silo is the utility VM. -// - The silo host of a silo on a physical host is the physical host. -func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 - return guid.GUID{ - Data1: 0x36bd0c5c, - Data2: 0x7276, - Data3: 0x4223, - Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, - } -} - -// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. -func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd - return guid.GUID{ - Data1: 0x90db8b89, - Data2: 0xd35, - Data3: 0x4f79, - Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, - } -} - -// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. -// Listening on this VmId accepts connection from: -// - Inside silos: silo host partition. -// - Inside hosted silo: host of the VM. -// - Inside VM: VM host. -// - Physical host: Not supported. -func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 - return guid.GUID{ - Data1: 0xa42e7cda, - Data2: 0xd03f, - Data3: 0x480c, - Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, - } -} - -// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. -func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 - return guid.GUID{ - Data2: 0xfacb, - Data3: 0x11e6, - Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, - } -} - -// An HvsockAddr is an address for a AF_HYPERV socket. -type HvsockAddr struct { - VMID guid.GUID - ServiceID guid.GUID -} - -type rawHvsockAddr struct { - Family uint16 - _ uint16 - VMID guid.GUID - ServiceID guid.GUID -} - -var _ socket.RawSockaddr = &rawHvsockAddr{} - -// Network returns the address's network name, "hvsock". -func (*HvsockAddr) Network() string { - return "hvsock" -} - -func (addr *HvsockAddr) String() string { - return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) -} - -// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. -func VsockServiceID(port uint32) guid.GUID { - g := hvsockVsockServiceTemplate() // make a copy - g.Data1 = port - return g -} - -func (addr *HvsockAddr) raw() rawHvsockAddr { - return rawHvsockAddr{ - Family: afHVSock, - VMID: addr.VMID, - ServiceID: addr.ServiceID, - } -} - -func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { - addr.VMID = raw.VMID - addr.ServiceID = raw.ServiceID -} - -// Sockaddr returns a pointer to and the size of this struct. -// -// Implements the [socket.RawSockaddr] interface, and allows use in -// [socket.Bind] and [socket.ConnectEx]. -func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { - return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil -} - -// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. -func (r *rawHvsockAddr) FromBytes(b []byte) error { - n := int(unsafe.Sizeof(rawHvsockAddr{})) - - if len(b) < n { - return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) - } - - copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) - if r.Family != afHVSock { - return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) - } - - return nil -} - -// HvsockListener is a socket listener for the AF_HYPERV address family. -type HvsockListener struct { - sock *win32File - addr HvsockAddr -} - -var _ net.Listener = &HvsockListener{} - -// HvsockConn is a connected socket of the AF_HYPERV address family. -type HvsockConn struct { - sock *win32File - local, remote HvsockAddr -} - -var _ net.Conn = &HvsockConn{} - -func newHVSocket() (*win32File, error) { - fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1) - if err != nil { - return nil, os.NewSyscallError("socket", err) - } - f, err := makeWin32File(fd) - if err != nil { - windows.Close(fd) - return nil, err - } - f.socket = true - return f, nil -} - -// ListenHvsock listens for connections on the specified hvsock address. -func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { - l := &HvsockListener{addr: *addr} - - var sock *win32File - sock, err = newHVSocket() - if err != nil { - return nil, l.opErr("listen", err) - } - defer func() { - if err != nil { - _ = sock.Close() - } - }() - - sa := addr.raw() - err = socket.Bind(sock.handle, &sa) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("socket", err)) - } - err = windows.Listen(sock.handle, 16) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("listen", err)) - } - return &HvsockListener{sock: sock, addr: *addr}, nil -} - -func (l *HvsockListener) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} -} - -// Addr returns the listener's network address. -func (l *HvsockListener) Addr() net.Addr { - return &l.addr -} - -// Accept waits for the next connection and returns it. -func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHVSocket() - if err != nil { - return nil, l.opErr("accept", err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := l.sock.prepareIO() - if err != nil { - return nil, l.opErr("accept", err) - } - defer l.sock.wg.Done() - - // AcceptEx, per documentation, requires an extra 16 bytes per address. - // - // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex - const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) - var addrbuf [addrlen * 2]byte - - var bytes uint32 - err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) - if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { - return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) - } - - conn := &HvsockConn{ - sock: sock, - } - // The local address returned in the AcceptEx buffer is the same as the Listener socket's - // address. However, the service GUID reported by GetSockName is different from the Listeners - // socket, and is sometimes the same as the local address of the socket that dialed the - // address, with the service GUID.Data1 incremented, but othertimes is different. - // todo: does the local address matter? is the listener's address or the actual address appropriate? - conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) - conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) - - // initialize the accepted socket and update its properties with those of the listening socket - if err = windows.Setsockopt(sock.handle, - windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, - (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { - return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) - } - - sock = nil - return conn, nil -} - -// Close closes the listener, causing any pending Accept calls to fail. -func (l *HvsockListener) Close() error { - return l.sock.Close() -} - -// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). -type HvsockDialer struct { - // Deadline is the time the Dial operation must connect before erroring. - Deadline time.Time - - // Retries is the number of additional connects to try if the connection times out, is refused, - // or the host is unreachable - Retries uint - - // RetryWait is the time to wait after a connection error to retry - RetryWait time.Duration - - rt *time.Timer // redial wait timer -} - -// Dial the Hyper-V socket at addr. -// -// See [HvsockDialer.Dial] for more information. -func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { - return (&HvsockDialer{}).Dial(ctx, addr) -} - -// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. -// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between -// retries. -// -// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. -func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { - op := "dial" - // create the conn early to use opErr() - conn = &HvsockConn{ - remote: *addr, - } - - if !d.Deadline.IsZero() { - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, d.Deadline) - defer cancel() - } - - // preemptive timeout/cancellation check - if err = ctx.Err(); err != nil { - return nil, conn.opErr(op, err) - } - - sock, err := newHVSocket() - if err != nil { - return nil, conn.opErr(op, err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - - sa := addr.raw() - err = socket.Bind(sock.handle, &sa) - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("bind", err)) - } - - c, err := sock.prepareIO() - if err != nil { - return nil, conn.opErr(op, err) - } - defer sock.wg.Done() - var bytes uint32 - for i := uint(0); i <= d.Retries; i++ { - err = socket.ConnectEx( - sock.handle, - &sa, - nil, // sendBuf - 0, // sendDataLen - &bytes, - (*windows.Overlapped)(unsafe.Pointer(&c.o))) - _, err = sock.asyncIO(c, nil, bytes, err) - if i < d.Retries && canRedial(err) { - if err = d.redialWait(ctx); err == nil { - continue - } - } - break - } - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) - } - - // update the connection properties, so shutdown can be used - if err = windows.Setsockopt( - sock.handle, - windows.SOL_SOCKET, - windows.SO_UPDATE_CONNECT_CONTEXT, - nil, // optvalue - 0, // optlen - ); err != nil { - return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) - } - - // get the local name - var sal rawHvsockAddr - err = socket.GetSockName(sock.handle, &sal) - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) - } - conn.local.fromRaw(&sal) - - // one last check for timeout, since asyncIO doesn't check the context - if err = ctx.Err(); err != nil { - return nil, conn.opErr(op, err) - } - - conn.sock = sock - sock = nil - - return conn, nil -} - -// redialWait waits before attempting to redial, resetting the timer as appropriate. -func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { - if d.RetryWait == 0 { - return nil - } - - if d.rt == nil { - d.rt = time.NewTimer(d.RetryWait) - } else { - // should already be stopped and drained - d.rt.Reset(d.RetryWait) - } - - select { - case <-ctx.Done(): - case <-d.rt.C: - return nil - } - - // stop and drain the timer - if !d.rt.Stop() { - <-d.rt.C - } - return ctx.Err() -} - -// assumes error is a plain, unwrapped windows.Errno provided by direct syscall. -func canRedial(err error) bool { - //nolint:errorlint // guaranteed to be an Errno - switch err { - case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, - windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: - return true - default: - return false - } -} - -func (conn *HvsockConn) opErr(op string, err error) error { - // translate from "file closed" to "socket closed" - if errors.Is(err, ErrFileClosed) { - err = socket.ErrSocketClosed - } - return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} -} - -func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIO() - if err != nil { - return 0, conn.opErr("read", err) - } - defer conn.sock.wg.Done() - buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var flags, bytes uint32 - err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) - if err != nil { - var eno windows.Errno - if errors.As(err, &eno) { - err = os.NewSyscallError("wsarecv", eno) - } - return 0, conn.opErr("read", err) - } else if n == 0 { - err = io.EOF - } - return n, err -} - -func (conn *HvsockConn) Write(b []byte) (int, error) { - t := 0 - for len(b) != 0 { - n, err := conn.write(b) - if err != nil { - return t + n, err - } - t += n - b = b[n:] - } - return t, nil -} - -func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIO() - if err != nil { - return 0, conn.opErr("write", err) - } - defer conn.sock.wg.Done() - buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var bytes uint32 - err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) - if err != nil { - var eno windows.Errno - if errors.As(err, &eno) { - err = os.NewSyscallError("wsasend", eno) - } - return 0, conn.opErr("write", err) - } - return n, err -} - -// Close closes the socket connection, failing any pending read or write calls. -func (conn *HvsockConn) Close() error { - return conn.sock.Close() -} - -func (conn *HvsockConn) IsClosed() bool { - return conn.sock.IsClosed() -} - -// shutdown disables sending or receiving on a socket. -func (conn *HvsockConn) shutdown(how int) error { - if conn.IsClosed() { - return socket.ErrSocketClosed - } - - err := windows.Shutdown(conn.sock.handle, how) - if err != nil { - // If the connection was closed, shutdowns fail with "not connected" - if errors.Is(err, windows.WSAENOTCONN) || - errors.Is(err, windows.WSAESHUTDOWN) { - err = socket.ErrSocketClosed - } - return os.NewSyscallError("shutdown", err) - } - return nil -} - -// CloseRead shuts down the read end of the socket, preventing future read operations. -func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(windows.SHUT_RD) - if err != nil { - return conn.opErr("closeread", err) - } - return nil -} - -// CloseWrite shuts down the write end of the socket, preventing future write operations and -// notifying the other endpoint that no more data will be written. -func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(windows.SHUT_WR) - if err != nil { - return conn.opErr("closewrite", err) - } - return nil -} - -// LocalAddr returns the local address of the connection. -func (conn *HvsockConn) LocalAddr() net.Addr { - return &conn.local -} - -// RemoteAddr returns the remote address of the connection. -func (conn *HvsockConn) RemoteAddr() net.Addr { - return &conn.remote -} - -// SetDeadline implements the net.Conn SetDeadline method. -func (conn *HvsockConn) SetDeadline(t time.Time) error { - // todo: implement `SetDeadline` for `win32File` - if err := conn.SetReadDeadline(t); err != nil { - return fmt.Errorf("set read deadline: %w", err) - } - if err := conn.SetWriteDeadline(t); err != nil { - return fmt.Errorf("set write deadline: %w", err) - } - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -func (conn *HvsockConn) SetReadDeadline(t time.Time) error { - return conn.sock.SetReadDeadline(t) -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { - return conn.sock.SetWriteDeadline(t) -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go deleted file mode 100644 index 1f653881783..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// This package contains Win32 filesystem functionality. -package fs diff --git a/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go deleted file mode 100644 index 0cd9621df78..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go +++ /dev/null @@ -1,262 +0,0 @@ -//go:build windows - -package fs - -import ( - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/stringbuffer" -) - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go - -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew -//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW - -const NullHandle windows.Handle = 0 - -// AccessMask defines standard, specific, and generic rights. -// -// Used with CreateFile and NtCreateFile (and co.). -// -// Bitmask: -// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 -// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 -// +---------------+---------------+-------------------------------+ -// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | -// |R|W|E|A| |S| | | -// +-+-------------+---------------+-------------------------------+ -// -// GR Generic Read -// GW Generic Write -// GE Generic Exectue -// GA Generic All -// Resvd Reserved -// AS Access Security System -// -// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask -// -// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights -// -// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants -type AccessMask = windows.ACCESS_MASK - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // Not actually any. - // - // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" - // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters - FILE_ANY_ACCESS AccessMask = 0 - - GENERIC_READ AccessMask = 0x8000_0000 - GENERIC_WRITE AccessMask = 0x4000_0000 - GENERIC_EXECUTE AccessMask = 0x2000_0000 - GENERIC_ALL AccessMask = 0x1000_0000 - ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000 - - // Specific Object Access - // from ntioapi.h - - FILE_READ_DATA AccessMask = (0x0001) // file & pipe - FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory - - FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe - FILE_ADD_FILE AccessMask = (0x0002) // directory - - FILE_APPEND_DATA AccessMask = (0x0004) // file - FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory - FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe - - FILE_READ_EA AccessMask = (0x0008) // file & directory - FILE_READ_PROPERTIES AccessMask = FILE_READ_EA - - FILE_WRITE_EA AccessMask = (0x0010) // file & directory - FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA - - FILE_EXECUTE AccessMask = (0x0020) // file - FILE_TRAVERSE AccessMask = (0x0020) // directory - - FILE_DELETE_CHILD AccessMask = (0x0040) // directory - - FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all - - FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all - - FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) - FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) - FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) - FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) - - SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF - - // Standard Access - // from ntseapi.h - - DELETE AccessMask = 0x0001_0000 - READ_CONTROL AccessMask = 0x0002_0000 - WRITE_DAC AccessMask = 0x0004_0000 - WRITE_OWNER AccessMask = 0x0008_0000 - SYNCHRONIZE AccessMask = 0x0010_0000 - - STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 - - STANDARD_RIGHTS_READ AccessMask = READ_CONTROL - STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL - STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL - - STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 -) - -type FileShareMode uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - FILE_SHARE_NONE FileShareMode = 0x00 - FILE_SHARE_READ FileShareMode = 0x01 - FILE_SHARE_WRITE FileShareMode = 0x02 - FILE_SHARE_DELETE FileShareMode = 0x04 - FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 -) - -type FileCreationDisposition uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winbase.h - - CREATE_NEW FileCreationDisposition = 0x01 - CREATE_ALWAYS FileCreationDisposition = 0x02 - OPEN_EXISTING FileCreationDisposition = 0x03 - OPEN_ALWAYS FileCreationDisposition = 0x04 - TRUNCATE_EXISTING FileCreationDisposition = 0x05 -) - -// Create disposition values for NtCreate* -type NTFileCreationDisposition uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // From ntioapi.h - - FILE_SUPERSEDE NTFileCreationDisposition = 0x00 - FILE_OPEN NTFileCreationDisposition = 0x01 - FILE_CREATE NTFileCreationDisposition = 0x02 - FILE_OPEN_IF NTFileCreationDisposition = 0x03 - FILE_OVERWRITE NTFileCreationDisposition = 0x04 - FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05 - FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05 -) - -// CreateFile and co. take flags or attributes together as one parameter. -// Define alias until we can use generics to allow both -// -// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants -type FileFlagOrAttribute uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winnt.h - - FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 - FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 - FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 - FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 - FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 - FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 - FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 - FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 - FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 - FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 - FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 -) - -// NtCreate* functions take a dedicated CreateOptions parameter. -// -// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile -// -// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file -type NTCreateOptions uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // From ntioapi.h - - FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001 - FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002 - FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004 - FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008 - - FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010 - FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020 - FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040 - FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080 - - FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100 - FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200 - FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400 - FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800 - - FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000 - FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000 - FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000 - FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000 -) - -type FileSQSFlag = FileFlagOrAttribute - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winbase.h - - SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) - SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) - SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) - SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) - - SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000 - SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000 -) - -// GetFinalPathNameByHandle flags -// -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters -type GetFinalPathFlag uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 - - FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 - FILE_NAME_OPENED GetFinalPathFlag = 0x8 - - VOLUME_NAME_DOS GetFinalPathFlag = 0x0 - VOLUME_NAME_GUID GetFinalPathFlag = 0x1 - VOLUME_NAME_NT GetFinalPathFlag = 0x2 - VOLUME_NAME_NONE GetFinalPathFlag = 0x4 -) - -// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle -// with the given handle and flags. It transparently takes care of creating a buffer of the -// correct size for the call. -// -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew -func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { - b := stringbuffer.NewWString() - //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? - for { - n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) - if err != nil { - return "", err - } - // If the buffer wasn't large enough, n will be the total size needed (including null terminator). - // Resize and try again. - if n > b.Cap() { - b.ResizeTo(n) - continue - } - // If the buffer is large enough, n will be the size not including the null terminator. - // Convert to a Go string and return. - return b.String(), nil - } -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/security.go deleted file mode 100644 index 81760ac67e9..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/security.go +++ /dev/null @@ -1,12 +0,0 @@ -package fs - -// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level -type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` - -// Impersonation levels -const ( - SecurityAnonymous SecurityImpersonationLevel = 0 - SecurityIdentification SecurityImpersonationLevel = 1 - SecurityImpersonation SecurityImpersonationLevel = 2 - SecurityDelegation SecurityImpersonationLevel = 3 -) diff --git a/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go deleted file mode 100644 index a94e234c706..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package fs - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - return e -} - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procCreateFileW = modkernel32.NewProc("CreateFileW") -) - -func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) - handle = windows.Handle(r0) - if handle == windows.InvalidHandle { - err = errnoErr(e1) - } - return -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/e2e/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go deleted file mode 100644 index 7e82f9afa95..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go +++ /dev/null @@ -1,20 +0,0 @@ -package socket - -import ( - "unsafe" -) - -// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The -// struct must meet the Win32 sockaddr requirements specified here: -// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 -// -// Specifically, the struct size must be least larger than an int16 (unsigned short) -// for the address family. -type RawSockaddr interface { - // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing - // for the RawSockaddr's data to be overwritten by syscalls (if necessary). - // - // It is the callers responsibility to validate that the values are valid; invalid - // pointers or size can cause a panic. - Sockaddr() (unsafe.Pointer, int32, error) -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/e2e/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go deleted file mode 100644 index 88580d974ec..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ /dev/null @@ -1,177 +0,0 @@ -//go:build windows - -package socket - -import ( - "errors" - "fmt" - "net" - "sync" - "syscall" - "unsafe" - - "github.com/Microsoft/go-winio/pkg/guid" - "golang.org/x/sys/windows" -) - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go - -//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname -//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername -//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind - -const socketError = uintptr(^uint32(0)) - -var ( - // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? - - ErrBufferSize = errors.New("buffer size") - ErrAddrFamily = errors.New("address family") - ErrInvalidPointer = errors.New("invalid pointer") - ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) -) - -// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) - -// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. -// If rsa is not large enough, the [windows.WSAEFAULT] is returned. -func GetSockName(s windows.Handle, rsa RawSockaddr) error { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - // although getsockname returns WSAEFAULT if the buffer is too small, it does not set - // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy - return getsockname(s, ptr, &l) -} - -// GetPeerName returns the remote address the socket is connected to. -// -// See [GetSockName] for more information. -func GetPeerName(s windows.Handle, rsa RawSockaddr) error { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - return getpeername(s, ptr, &l) -} - -func Bind(s windows.Handle, rsa RawSockaddr) (err error) { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - return bind(s, ptr, l) -} - -// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the -// their sockaddr interface, so they cannot be used with HvsockAddr -// Replicate functionality here from -// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go - -// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at -// runtime via a WSAIoctl call: -// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks - -type runtimeFunc struct { - id guid.GUID - once sync.Once - addr uintptr - err error -} - -func (f *runtimeFunc) Load() error { - f.once.Do(func() { - var s windows.Handle - s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) - if f.err != nil { - return - } - defer windows.CloseHandle(s) //nolint:errcheck - - var n uint32 - f.err = windows.WSAIoctl(s, - windows.SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&f.id)), - uint32(unsafe.Sizeof(f.id)), - (*byte)(unsafe.Pointer(&f.addr)), - uint32(unsafe.Sizeof(f.addr)), - &n, - nil, // overlapped - 0, // completionRoutine - ) - }) - return f.err -} - -var ( - // todo: add `AcceptEx` and `GetAcceptExSockaddrs` - WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS - Data1: 0x25a207b9, - Data2: 0xddf3, - Data3: 0x4660, - Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, - } - - connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} -) - -func ConnectEx( - fd windows.Handle, - rsa RawSockaddr, - sendBuf *byte, - sendDataLen uint32, - bytesSent *uint32, - overlapped *windows.Overlapped, -) error { - if err := connectExFunc.Load(); err != nil { - return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) - } - ptr, n, err := rsa.Sockaddr() - if err != nil { - return err - } - return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) -} - -// BOOL LpfnConnectex( -// [in] SOCKET s, -// [in] const sockaddr *name, -// [in] int namelen, -// [in, optional] PVOID lpSendBuffer, -// [in] DWORD dwSendDataLength, -// [out] LPDWORD lpdwBytesSent, -// [in] LPOVERLAPPED lpOverlapped -// ) - -func connectEx( - s windows.Handle, - name unsafe.Pointer, - namelen int32, - sendBuf *byte, - sendDataLen uint32, - bytesSent *uint32, - overlapped *windows.Overlapped, -) (err error) { - r1, _, e1 := syscall.SyscallN(connectExFunc.addr, - uintptr(s), - uintptr(name), - uintptr(namelen), - uintptr(unsafe.Pointer(sendBuf)), - uintptr(sendDataLen), - uintptr(unsafe.Pointer(bytesSent)), - uintptr(unsafe.Pointer(overlapped)), - ) - - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return err -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/e2e/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go deleted file mode 100644 index e1504126aa6..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package socket - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - return e -} - -var ( - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procbind = modws2_32.NewProc("bind") - procgetpeername = modws2_32.NewProc("getpeername") - procgetsockname = modws2_32.NewProc("getsockname") -) - -func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} - -func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) - if r1 == socketError { - err = errnoErr(e1) - } - return -} - -func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/e2e/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go deleted file mode 100644 index 42ebc019fcb..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go +++ /dev/null @@ -1,132 +0,0 @@ -package stringbuffer - -import ( - "sync" - "unicode/utf16" -) - -// TODO: worth exporting and using in mkwinsyscall? - -// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate -// large path strings: -// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. -const MinWStringCap = 310 - -// use *[]uint16 since []uint16 creates an extra allocation where the slice header -// is copied to heap and then referenced via pointer in the interface header that sync.Pool -// stores. -var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly - New: func() interface{} { - b := make([]uint16, MinWStringCap) - return &b - }, -} - -func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } - -// freeBuffer copies the slice header data, and puts a pointer to that in the pool. -// This avoids taking a pointer to the slice header in WString, which can be set to nil. -func freeBuffer(b []uint16) { pathPool.Put(&b) } - -// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings -// for interacting with Win32 APIs. -// Sizes are specified as uint32 and not int. -// -// It is not thread safe. -type WString struct { - // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. - - // raw buffer - b []uint16 -} - -// NewWString returns a [WString] allocated from a shared pool with an -// initial capacity of at least [MinWStringCap]. -// Since the buffer may have been previously used, its contents are not guaranteed to be empty. -// -// The buffer should be freed via [WString.Free] -func NewWString() *WString { - return &WString{ - b: newBuffer(), - } -} - -func (b *WString) Free() { - if b.empty() { - return - } - freeBuffer(b.b) - b.b = nil -} - -// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the -// previous buffer back into pool. -func (b *WString) ResizeTo(c uint32) uint32 { - // already sufficient (or n is 0) - if c <= b.Cap() { - return b.Cap() - } - - if c <= MinWStringCap { - c = MinWStringCap - } - // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places - if c <= 2*b.Cap() { - c = 2 * b.Cap() - } - - b2 := make([]uint16, c) - if !b.empty() { - copy(b2, b.b) - freeBuffer(b.b) - } - b.b = b2 - return c -} - -// Buffer returns the underlying []uint16 buffer. -func (b *WString) Buffer() []uint16 { - if b.empty() { - return nil - } - return b.b -} - -// Pointer returns a pointer to the first uint16 in the buffer. -// If the [WString.Free] has already been called, the pointer will be nil. -func (b *WString) Pointer() *uint16 { - if b.empty() { - return nil - } - return &b.b[0] -} - -// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. -// -// It assumes that the data is null-terminated. -func (b *WString) String() string { - // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" - // and would make this code Windows-only, which makes no sense. - // So copy UTF16ToString code into here. - // If other windows-specific code is added, switch to [windows.UTF16ToString] - - s := b.b - for i, v := range s { - if v == 0 { - s = s[:i] - break - } - } - return string(utf16.Decode(s)) -} - -// Cap returns the underlying buffer capacity. -func (b *WString) Cap() uint32 { - if b.empty() { - return 0 - } - return b.cap() -} - -func (b *WString) cap() uint32 { return uint32(cap(b.b)) } -func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/e2e/vendor/github.com/Microsoft/go-winio/pipe.go b/e2e/vendor/github.com/Microsoft/go-winio/pipe.go deleted file mode 100644 index a2da6639d00..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/pipe.go +++ /dev/null @@ -1,586 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "runtime" - "time" - "unsafe" - - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/fs" -) - -//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW -//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe -//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl - -type PipeConn interface { - net.Conn - Disconnect() error - Flush() error -} - -// type aliases for mkwinsyscall code -type ( - ntAccessMask = fs.AccessMask - ntFileShareMode = fs.FileShareMode - ntFileCreationDisposition = fs.NTFileCreationDisposition - ntFileOptions = fs.NTCreateOptions -) - -type ioStatusBlock struct { - Status, Information uintptr -} - -// typedef struct _OBJECT_ATTRIBUTES { -// ULONG Length; -// HANDLE RootDirectory; -// PUNICODE_STRING ObjectName; -// ULONG Attributes; -// PVOID SecurityDescriptor; -// PVOID SecurityQualityOfService; -// } OBJECT_ATTRIBUTES; -// -// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes -type objectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *unicodeString - Attributes uintptr - SecurityDescriptor *securityDescriptor - SecurityQoS uintptr -} - -type unicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer uintptr -} - -// typedef struct _SECURITY_DESCRIPTOR { -// BYTE Revision; -// BYTE Sbz1; -// SECURITY_DESCRIPTOR_CONTROL Control; -// PSID Owner; -// PSID Group; -// PACL Sacl; -// PACL Dacl; -// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR; -// -// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor -type securityDescriptor struct { - Revision byte - Sbz1 byte - Control uint16 - Owner uintptr - Group uintptr - Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl - Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl -} - -type ntStatus int32 - -func (status ntStatus) Err() error { - if status >= 0 { - return nil - } - return rtlNtStatusToDosError(status) -} - -var ( - // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - ErrPipeListenerClosed = net.ErrClosed - - errPipeWriteClosed = errors.New("pipe has been closed for write") -) - -type win32Pipe struct { - *win32File - path string -} - -var _ PipeConn = (*win32Pipe)(nil) - -type win32MessageBytePipe struct { - win32Pipe - writeClosed bool - readEOF bool -} - -type pipeAddress string - -func (f *win32Pipe) LocalAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) RemoteAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) SetDeadline(t time.Time) error { - if err := f.SetReadDeadline(t); err != nil { - return err - } - return f.SetWriteDeadline(t) -} - -func (f *win32Pipe) Disconnect() error { - return disconnectNamedPipe(f.win32File.handle) -} - -// CloseWrite closes the write side of a message pipe in byte mode. -func (f *win32MessageBytePipe) CloseWrite() error { - if f.writeClosed { - return errPipeWriteClosed - } - err := f.win32File.Flush() - if err != nil { - return err - } - _, err = f.win32File.Write(nil) - if err != nil { - return err - } - f.writeClosed = true - return nil -} - -// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since -// they are used to implement CloseWrite(). -func (f *win32MessageBytePipe) Write(b []byte) (int, error) { - if f.writeClosed { - return 0, errPipeWriteClosed - } - if len(b) == 0 { - return 0, nil - } - return f.win32File.Write(b) -} - -// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message -// mode pipe will return io.EOF, as will all subsequent reads. -func (f *win32MessageBytePipe) Read(b []byte) (int, error) { - if f.readEOF { - return 0, io.EOF - } - n, err := f.win32File.Read(b) - if err == io.EOF { //nolint:errorlint - // If this was the result of a zero-byte read, then - // it is possible that the read was due to a zero-size - // message. Since we are simulating CloseWrite with a - // zero-byte message, ensure that all future Read() calls - // also return EOF. - f.readEOF = true - } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno - // ERROR_MORE_DATA indicates that the pipe's read mode is message mode - // and the message still has more bytes. Treat this as a success, since - // this package presents all named pipes as byte streams. - err = nil - } - return n, err -} - -func (pipeAddress) Network() string { - return "pipe" -} - -func (s pipeAddress) String() string { - return string(s) -} - -// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) { - for { - select { - case <-ctx.Done(): - return windows.Handle(0), ctx.Err() - default: - h, err := fs.CreateFile(*path, - access, - 0, // mode - nil, // security attributes - fs.OPEN_EXISTING, - fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel), - 0, // template file handle - ) - if err == nil { - return h, nil - } - if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno - return h, &os.PathError{Err: err, Op: "open", Path: *path} - } - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(10 * time.Millisecond) - } - } -} - -// DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) -func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { - var absTimeout time.Time - if timeout != nil { - absTimeout = time.Now().Add(*timeout) - } else { - absTimeout = time.Now().Add(2 * time.Second) - } - ctx, cancel := context.WithDeadline(context.Background(), absTimeout) - defer cancel() - conn, err := DialPipeContext(ctx, path) - if errors.Is(err, context.DeadlineExceeded) { - return nil, ErrTimeout - } - return conn, err -} - -// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` -// cancellation or timeout. -func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE)) -} - -// PipeImpLevel is an enumeration of impersonation levels that may be set -// when calling DialPipeAccessImpersonation. -type PipeImpLevel uint32 - -const ( - PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS) - PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION) - PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION) - PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION) -) - -// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` -// cancellation or timeout. -func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { - return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous) -} - -// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with -// `access` at `impLevel` until `ctx` cancellation or timeout. The other -// DialPipe* implementations use PipeImpLevelAnonymous. -func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) { - var err error - var h windows.Handle - h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel) - if err != nil { - return nil, err - } - - var flags uint32 - err = getNamedPipeInfo(h, &flags, nil, nil, nil) - if err != nil { - return nil, err - } - - f, err := makeWin32File(h) - if err != nil { - windows.Close(h) - return nil, err - } - - // If the pipe is in message mode, return a message byte pipe, which - // supports CloseWrite(). - if flags&windows.PIPE_TYPE_MESSAGE != 0 { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: f, path: path}, - }, nil - } - return &win32Pipe{win32File: f, path: path}, nil -} - -type acceptResponse struct { - f *win32File - err error -} - -type win32PipeListener struct { - firstHandle windows.Handle - path string - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int -} - -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) { - path16, err := windows.UTF16FromString(path) - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - var oa objectAttributes - oa.Length = unsafe.Sizeof(oa) - - var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], - &ntPath, - 0, - 0, - ).Err(); err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck - oa.ObjectName = &ntPath - oa.Attributes = windows.OBJ_CASE_INSENSITIVE - - // The security descriptor is only needed for the first pipe. - if first { - if sd != nil { - //todo: does `sdb` need to be allocated on the heap, or can go allocate it? - l := uint32(len(sd)) - sdb, err := windows.LocalAlloc(0, l) - if err != nil { - return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err) - } - defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck - copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) - oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) - } else { - // Construct the default named pipe security descriptor. - var dacl uintptr - if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %w", err) - } - defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck - - sdb := &securityDescriptor{ - Revision: 1, - Control: windows.SE_DACL_PRESENT, - Dacl: dacl, - } - oa.SecurityDescriptor = sdb - } - } - - typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) - if c.MessageMode { - typ |= windows.FILE_PIPE_MESSAGE_TYPE - } - - disposition := fs.FILE_OPEN - access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE - if first { - disposition = fs.FILE_CREATE - // By not asking for read or write access, the named pipe file system - // will put this pipe into an initially disconnected state, blocking - // client connections until the next call with first == false. - access = fs.SYNCHRONIZE - } - - timeout := int64(-50 * 10000) // 50ms - - var ( - h windows.Handle - iosb ioStatusBlock - ) - err = ntCreateNamedPipeFile(&h, - access, - &oa, - &iosb, - fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE, - disposition, - 0, - typ, - 0, - 0, - 0xffffffff, - uint32(c.InputBufferSize), - uint32(c.OutputBufferSize), - &timeout).Err() - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - runtime.KeepAlive(ntPath) - return h, nil -} - -func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, nil, &l.config, false) - if err != nil { - return nil, err - } - f, err := makeWin32File(h) - if err != nil { - windows.Close(h) - return nil, err - } - return f, nil -} - -func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { - p, err := l.makeServerPipe() - if err != nil { - return nil, err - } - - // Wait for the client to connect. - ch := make(chan error) - go func(p *win32File) { - ch <- connectPipe(p) - }(p) - - select { - case err = <-ch: - if err != nil { - p.Close() - p = nil - } - case <-l.closeCh: - // Abort the connect request by closing the handle. - p.Close() - p = nil - err = <-ch - if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno - err = ErrPipeListenerClosed - } - } - return p, err -} - -func (l *win32PipeListener) listenerRoutine() { - closed := false - for !closed { - select { - case <-l.closeCh: - closed = true - case responseCh := <-l.acceptCh: - var ( - p *win32File - err error - ) - for { - p, err = l.makeConnectedServerPipe() - // If the connection was immediately closed by the client, try - // again. - if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno - break - } - } - responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno - } - } - windows.Close(l.firstHandle) - l.firstHandle = 0 - // Notify Close() and Accept() callers that the handle has been closed. - close(l.doneCh) -} - -// PipeConfig contain configuration for the pipe listener. -type PipeConfig struct { - // SecurityDescriptor contains a Windows security descriptor in SDDL format. - SecurityDescriptor string - - // MessageMode determines whether the pipe is in byte or message mode. In either - // case the pipe is read in byte mode by default. The only practical difference in - // this implementation is that CloseWrite() is only supported for message mode pipes; - // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only - // transferred to the reader (and returned as io.EOF in this implementation) - // when the pipe is in message mode. - MessageMode bool - - // InputBufferSize specifies the size of the input buffer, in bytes. - InputBufferSize int32 - - // OutputBufferSize specifies the size of the output buffer, in bytes. - OutputBufferSize int32 -} - -// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. -// The pipe must not already exist. -func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { - var ( - sd []byte - err error - ) - if c == nil { - c = &PipeConfig{} - } - if c.SecurityDescriptor != "" { - sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) - if err != nil { - return nil, err - } - } - h, err := makeServerPipeHandle(path, sd, c, true) - if err != nil { - return nil, err - } - l := &win32PipeListener{ - firstHandle: h, - path: path, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), - } - go l.listenerRoutine() - return l, nil -} - -func connectPipe(p *win32File) error { - c, err := p.prepareIO() - if err != nil { - return err - } - defer p.wg.Done() - - err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIO(c, nil, 0, err) - if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno - return err - } - return nil -} - -func (l *win32PipeListener) Accept() (net.Conn, error) { - ch := make(chan acceptResponse) - select { - case l.acceptCh <- ch: - response := <-ch - err := response.err - if err != nil { - return nil, err - } - if l.config.MessageMode { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: response.f, path: l.path}, - }, nil - } - return &win32Pipe{win32File: response.f, path: l.path}, nil - case <-l.doneCh: - return nil, ErrPipeListenerClosed - } -} - -func (l *win32PipeListener) Close() error { - select { - case l.closeCh <- 1: - <-l.doneCh - case <-l.doneCh: - } - return nil -} - -func (l *win32PipeListener) Addr() net.Addr { - return pipeAddress(l.path) -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go deleted file mode 100644 index 48ce4e92436..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ /dev/null @@ -1,232 +0,0 @@ -// Package guid provides a GUID type. The backing structure for a GUID is -// identical to that used by the golang.org/x/sys/windows GUID type. -// There are two main binary encodings used for a GUID, the big-endian encoding, -// and the Windows (mixed-endian) encoding. See here for details: -// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding -package guid - -import ( - "crypto/rand" - "crypto/sha1" //nolint:gosec // not used for secure application - "encoding" - "encoding/binary" - "fmt" - "strconv" -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment - -// Variant specifies which GUID variant (or "type") of the GUID. It determines -// how the entirety of the rest of the GUID is interpreted. -type Variant uint8 - -// The variants specified by RFC 4122 section 4.1.1. -const ( - // VariantUnknown specifies a GUID variant which does not conform to one of - // the variant encodings specified in RFC 4122. - VariantUnknown Variant = iota - VariantNCS - VariantRFC4122 // RFC 4122 - VariantMicrosoft - VariantFuture -) - -// Version specifies how the bits in the GUID were generated. For instance, a -// version 4 GUID is randomly generated, and a version 5 is generated from the -// hash of an input string. -type Version uint8 - -func (v Version) String() string { - return strconv.FormatUint(uint64(v), 10) -} - -var _ = (encoding.TextMarshaler)(GUID{}) -var _ = (encoding.TextUnmarshaler)(&GUID{}) - -// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. -func NewV4() (GUID, error) { - var b [16]byte - if _, err := rand.Read(b[:]); err != nil { - return GUID{}, err - } - - g := FromArray(b) - g.setVersion(4) // Version 4 means randomly generated. - g.setVariant(VariantRFC4122) - - return g, nil -} - -// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) -// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, -// and the sample code treats it as a series of bytes, so we do the same here. -// -// Some implementations, such as those found on Windows, treat the name as a -// big-endian UTF16 stream of bytes. If that is desired, the string can be -// encoded as such before being passed to this function. -func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() //nolint:gosec // not used for secure application - namespaceBytes := namespace.ToArray() - b.Write(namespaceBytes[:]) - b.Write(name) - - a := [16]byte{} - copy(a[:], b.Sum(nil)) - - g := FromArray(a) - g.setVersion(5) // Version 5 means generated from a string. - g.setVariant(VariantRFC4122) - - return g, nil -} - -func fromArray(b [16]byte, order binary.ByteOrder) GUID { - var g GUID - g.Data1 = order.Uint32(b[0:4]) - g.Data2 = order.Uint16(b[4:6]) - g.Data3 = order.Uint16(b[6:8]) - copy(g.Data4[:], b[8:16]) - return g -} - -func (g GUID) toArray(order binary.ByteOrder) [16]byte { - b := [16]byte{} - order.PutUint32(b[0:4], g.Data1) - order.PutUint16(b[4:6], g.Data2) - order.PutUint16(b[6:8], g.Data3) - copy(b[8:16], g.Data4[:]) - return b -} - -// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. -func FromArray(b [16]byte) GUID { - return fromArray(b, binary.BigEndian) -} - -// ToArray returns an array of 16 bytes representing the GUID in big-endian -// encoding. -func (g GUID) ToArray() [16]byte { - return g.toArray(binary.BigEndian) -} - -// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. -func FromWindowsArray(b [16]byte) GUID { - return fromArray(b, binary.LittleEndian) -} - -// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows -// encoding. -func (g GUID) ToWindowsArray() [16]byte { - return g.toArray(binary.LittleEndian) -} - -func (g GUID) String() string { - return fmt.Sprintf( - "%08x-%04x-%04x-%04x-%012x", - g.Data1, - g.Data2, - g.Data3, - g.Data4[:2], - g.Data4[2:]) -} - -// FromString parses a string containing a GUID and returns the GUID. The only -// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -// format. -func FromString(s string) (GUID, error) { - if len(s) != 36 { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - - var g GUID - - data1, err := strconv.ParseUint(s[0:8], 16, 32) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data1 = uint32(data1) - - data2, err := strconv.ParseUint(s[9:13], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data2 = uint16(data2) - - data3, err := strconv.ParseUint(s[14:18], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data3 = uint16(data3) - - for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { - v, err := strconv.ParseUint(s[x:x+2], 16, 8) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data4[i] = uint8(v) - } - - return g, nil -} - -func (g *GUID) setVariant(v Variant) { - d := g.Data4[0] - switch v { - case VariantNCS: - d = (d & 0x7f) - case VariantRFC4122: - d = (d & 0x3f) | 0x80 - case VariantMicrosoft: - d = (d & 0x1f) | 0xc0 - case VariantFuture: - d = (d & 0x0f) | 0xe0 - case VariantUnknown: - fallthrough - default: - panic(fmt.Sprintf("invalid variant: %d", v)) - } - g.Data4[0] = d -} - -// Variant returns the GUID variant, as defined in RFC 4122. -func (g GUID) Variant() Variant { - b := g.Data4[0] - if b&0x80 == 0 { - return VariantNCS - } else if b&0xc0 == 0x80 { - return VariantRFC4122 - } else if b&0xe0 == 0xc0 { - return VariantMicrosoft - } else if b&0xe0 == 0xe0 { - return VariantFuture - } - return VariantUnknown -} - -func (g *GUID) setVersion(v Version) { - g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) -} - -// Version returns the GUID version, as defined in RFC 4122. -func (g GUID) Version() Version { - return Version((g.Data3 & 0xF000) >> 12) -} - -// MarshalText returns the textual representation of the GUID. -func (g GUID) MarshalText() ([]byte, error) { - return []byte(g.String()), nil -} - -// UnmarshalText takes the textual representation of a GUID, and unmarhals it -// into this GUID. -func (g *GUID) UnmarshalText(text []byte) error { - g2, err := FromString(string(text)) - if err != nil { - return err - } - *g = g2 - return nil -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go deleted file mode 100644 index 805bd354842..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows -// +build !windows - -package guid - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type as that is only available to builds -// targeted at `windows`. The representation matches that used by native Windows -// code. -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go deleted file mode 100644 index 27e45ee5ccf..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build windows -// +build windows - -package guid - -import "golang.org/x/sys/windows" - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID diff --git a/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go deleted file mode 100644 index 4076d3132fd..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. - -package guid - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[VariantUnknown-0] - _ = x[VariantNCS-1] - _ = x[VariantRFC4122-2] - _ = x[VariantMicrosoft-3] - _ = x[VariantFuture-4] -} - -const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" - -var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} - -func (i Variant) String() string { - if i >= Variant(len(_Variant_index)-1) { - return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/privilege.go b/e2e/vendor/github.com/Microsoft/go-winio/privilege.go deleted file mode 100644 index d9b90b6e861..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/privilege.go +++ /dev/null @@ -1,196 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" - "sync" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges -//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf -//sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h windows.Handle) = GetCurrentThread -//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW -//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW - -const ( - //revive:disable-next-line:var-naming ALL_CAPS - SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED - - //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED - - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" - SeSecurityPrivilege = "SeSecurityPrivilege" -) - -var ( - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { - privileges []uint64 -} - -func (e *PrivilegeError) Error() string { - s := "Could not enable privilege " - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -// RunWithPrivilege enables a single privilege for a function call. -func RunWithPrivilege(name string, fn func() error) error { - return RunWithPrivileges([]string{name}, fn) -} - -// RunWithPrivileges enables privileges for a function call. -func RunWithPrivileges(names []string, fn func() error) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - token, err := newThreadToken() - if err != nil { - return err - } - defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) - if err != nil { - return err - } - return fn() -} - -func mapPrivileges(names []string) ([]uint64, error) { - privileges := make([]uint64, 0, len(names)) - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableProcessPrivileges enables privileges globally for the process. -func EnableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) -} - -// DisableProcessPrivileges disables privileges globally for the process. -func DisableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, 0) -} - -func enableDisableProcessPrivilege(names []string, action uint32) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p := windows.CurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer token.Close() - return adjustPrivileges(token, privileges, action) -} - -func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { - var b bytes.Buffer - _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - _ = binary.Write(&b, binary.LittleEndian, p) - _ = binary.Write(&b, binary.LittleEndian, action) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno - return &PrivilegeError{privileges} - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -func newThreadToken() (windows.Token, error) { - err := impersonateSelf(windows.SecurityImpersonation) - if err != nil { - return 0, err - } - - var token windows.Token - err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token) - if err != nil { - rerr := revertToSelf() - if rerr != nil { - panic(rerr) - } - return 0, err - } - return token, nil -} - -func releaseThreadToken(h windows.Token) { - err := revertToSelf() - if err != nil { - panic(err) - } - h.Close() -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/reparse.go b/e2e/vendor/github.com/Microsoft/go-winio/reparse.go deleted file mode 100644 index 67d1a104a63..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/reparse.go +++ /dev/null @@ -1,131 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "strings" - "unicode/utf16" - "unsafe" -) - -const ( - reparseTagMountPoint = 0xA0000003 - reparseTagSymlink = 0xA000000C -) - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 -} - -// ReparsePoint describes a Win32 symlink or mount point. -type ReparsePoint struct { - Target string - IsMountPoint bool -} - -// UnsupportedReparsePointError is returned when trying to decode a non-symlink or -// mount point reparse point. -type UnsupportedReparsePointError struct { - Tag uint32 -} - -func (e *UnsupportedReparsePointError) Error() string { - return fmt.Sprintf("unsupported reparse point %x", e.Tag) -} - -// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink -// or a mount point. -func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { - tag := binary.LittleEndian.Uint32(b[0:4]) - return DecodeReparsePointData(tag, b[8:]) -} - -func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { - isMountPoint := false - switch tag { - case reparseTagMountPoint: - isMountPoint = true - case reparseTagSymlink: - default: - return nil, &UnsupportedReparsePointError{tag} - } - nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) - if !isMountPoint { - nameOffset += 4 - } - nameLength := binary.LittleEndian.Uint16(b[6:8]) - name := make([]uint16, nameLength/2) - err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) - if err != nil { - return nil, err - } - return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil -} - -func isDriveLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or -// mount point. -func EncodeReparsePoint(rp *ReparsePoint) []byte { - // Generate an NT path and determine if this is a relative path. - var ntTarget string - relative := false - if strings.HasPrefix(rp.Target, `\\?\`) { - ntTarget = `\??\` + rp.Target[4:] - } else if strings.HasPrefix(rp.Target, `\\`) { - ntTarget = `\??\UNC\` + rp.Target[2:] - } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { - ntTarget = `\??\` + rp.Target - } else { - ntTarget = rp.Target - relative = true - } - - // The paths must be NUL-terminated even though they are counted strings. - target16 := utf16.Encode([]rune(rp.Target + "\x00")) - ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) - - size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 - size += len(ntTarget16)*2 + len(target16)*2 - - tag := uint32(reparseTagMountPoint) - if !rp.IsMountPoint { - tag = reparseTagSymlink - size += 4 // Add room for symlink flags - } - - data := reparseDataBuffer{ - ReparseTag: tag, - ReparseDataLength: uint16(size), - SubstituteNameOffset: 0, - SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), - PrintNameOffset: uint16(len(ntTarget16) * 2), - PrintNameLength: uint16((len(target16) - 1) * 2), - } - - var b bytes.Buffer - _ = binary.Write(&b, binary.LittleEndian, &data) - if !rp.IsMountPoint { - flags := uint32(0) - if relative { - flags |= 1 - } - _ = binary.Write(&b, binary.LittleEndian, flags) - } - - _ = binary.Write(&b, binary.LittleEndian, ntTarget16) - _ = binary.Write(&b, binary.LittleEndian, target16) - return b.Bytes() -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/sd.go b/e2e/vendor/github.com/Microsoft/go-winio/sd.go deleted file mode 100644 index c3685e98e14..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/sd.go +++ /dev/null @@ -1,133 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "fmt" - "unsafe" - - "golang.org/x/sys/windows" -) - -//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW -//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW -//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW - -type AccountLookupError struct { - Name string - Err error -} - -func (e *AccountLookupError) Error() string { - if e.Name == "" { - return "lookup account: empty account name specified" - } - var s string - switch { - case errors.Is(e.Err, windows.ERROR_INVALID_SID): - s = "the security ID structure is invalid" - case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): - s = "not found" - default: - s = e.Err.Error() - } - return "lookup account " + e.Name + ": " + s -} - -func (e *AccountLookupError) Unwrap() error { return e.Err } - -type SddlConversionError struct { - Sddl string - Err error -} - -func (e *SddlConversionError) Error() string { - return "convert " + e.Sddl + ": " + e.Err.Error() -} - -func (e *SddlConversionError) Unwrap() error { return e.Err } - -// LookupSidByName looks up the SID of an account by name -// -//revive:disable-next-line:var-naming SID, not Sid -func LookupSidByName(name string) (sid string, err error) { - if name == "" { - return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} - } - - var sidSize, sidNameUse, refDomainSize uint32 - err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno - return "", &AccountLookupError{name, err} - } - sidBuffer := make([]byte, sidSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{name, err} - } - var strBuffer *uint16 - err = convertSidToStringSid(&sidBuffer[0], &strBuffer) - if err != nil { - return "", &AccountLookupError{name, err} - } - sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer))) - return sid, nil -} - -// LookupNameBySid looks up the name of an account by SID -// -//revive:disable-next-line:var-naming SID, not Sid -func LookupNameBySid(sid string) (name string, err error) { - if sid == "" { - return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} - } - - sidBuffer, err := windows.UTF16PtrFromString(sid) - if err != nil { - return "", &AccountLookupError{sid, err} - } - - var sidPtr *byte - if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { - return "", &AccountLookupError{sid, err} - } - defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck - - var nameSize, refDomainSize, sidNameUse uint32 - err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno - return "", &AccountLookupError{sid, err} - } - - nameBuffer := make([]uint16, nameSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{sid, err} - } - - name = windows.UTF16ToString(nameBuffer) - return name, nil -} - -func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - sd, err := windows.SecurityDescriptorFromString(sddl) - if err != nil { - return nil, &SddlConversionError{Sddl: sddl, Err: err} - } - b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) - return b, nil -} - -func SecurityDescriptorToSddl(sd []byte) (string, error) { - if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { - return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) - } - s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) - return s.String(), nil -} diff --git a/e2e/vendor/github.com/Microsoft/go-winio/syscall.go b/e2e/vendor/github.com/Microsoft/go-winio/syscall.go deleted file mode 100644 index a6ca111b39c..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/syscall.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build windows - -package winio - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/e2e/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/e2e/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go deleted file mode 100644 index 89b66eda8cc..00000000000 --- a/e2e/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ /dev/null @@ -1,378 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package winio - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") -) - -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } - r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - err = errnoErr(e1) - } - return -} - -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSidToSid(str *uint16, sid **byte) (err error) { - r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return - } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) -} - -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) -} - -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) -} - -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } - r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func revertToSelf() (err error) { - r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) { - r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) { - r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount)) - newport = windows.Handle(r0) - if newport == 0 { - err = errnoErr(e1) - } - return -} - -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) -} - -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) - handle = windows.Handle(r0) - if handle == windows.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func disconnectNamedPipe(pipe windows.Handle) (err error) { - r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getCurrentThread() (h windows.Handle) { - r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr()) - h = windows.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { - r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) - status = ntStatus(r0) - return -} - -func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { - r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl))) - status = ntStatus(r0) - return -} - -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { - r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved)) - status = ntStatus(r0) - return -} - -func rtlNtStatusToDosError(status ntStatus) (winerr error) { - r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} - -func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } - r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/LICENSE b/e2e/vendor/github.com/containerd/containerd/api/LICENSE deleted file mode 100644 index 584149b6ee2..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go deleted file mode 100644 index aab9e45b126..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go +++ /dev/null @@ -1,1178 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto - -package containers - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - emptypb "google.golang.org/protobuf/types/known/emptypb" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Container struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID is the user-specified identifier. - // - // This field may not be updated. - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Labels provides an area to include arbitrary data on containers. - // - // The combined size of a key/value pair cannot exceed 4096 bytes. - // - // Note that to add a new value to this field, read the existing set and - // include the entire result in the update call. - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Image contains the reference of the image used to build the - // specification and snapshots for running this container. - // - // If this field is updated, the spec and rootfs needed to updated, as well. - Image string `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"` - // Runtime specifies which runtime to use for executing this container. - Runtime *Container_Runtime `protobuf:"bytes,4,opt,name=runtime,proto3" json:"runtime,omitempty"` - // Spec to be used when creating the container. This is runtime specific. - Spec *anypb.Any `protobuf:"bytes,5,opt,name=spec,proto3" json:"spec,omitempty"` - // Snapshotter specifies the snapshotter name used for rootfs - Snapshotter string `protobuf:"bytes,6,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - // SnapshotKey specifies the snapshot key to use for the container's root - // filesystem. When starting a task from this container, a caller should - // look up the mounts from the snapshot service and include those on the - // task create request. - // - // Snapshots referenced in this field will not be garbage collected. - // - // This field is set to empty when the rootfs is not a snapshot. - // - // This field may be updated. - SnapshotKey string `protobuf:"bytes,7,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"` - // CreatedAt is the time the container was first created. - CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - // UpdatedAt is the last time the container was mutated. - UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` - // Extensions allow clients to provide zero or more blobs that are directly - // associated with the container. One may provide protobuf, json, or other - // encoding formats. The primary use of this is to further decorate the - // container object with fields that may be specific to a client integration. - // - // The key portion of this map should identify a "name" for the extension - // that should be unique against other extensions. When updating extension - // data, one should only update the specified extension using field paths - // to select a specific map key. - Extensions map[string]*anypb.Any `protobuf:"bytes,10,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Sandbox ID this container belongs to. - Sandbox string `protobuf:"bytes,11,opt,name=sandbox,proto3" json:"sandbox,omitempty"` -} - -func (x *Container) Reset() { - *x = Container{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Container) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Container) ProtoMessage() {} - -func (x *Container) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Container.ProtoReflect.Descriptor instead. -func (*Container) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{0} -} - -func (x *Container) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -func (x *Container) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *Container) GetImage() string { - if x != nil { - return x.Image - } - return "" -} - -func (x *Container) GetRuntime() *Container_Runtime { - if x != nil { - return x.Runtime - } - return nil -} - -func (x *Container) GetSpec() *anypb.Any { - if x != nil { - return x.Spec - } - return nil -} - -func (x *Container) GetSnapshotter() string { - if x != nil { - return x.Snapshotter - } - return "" -} - -func (x *Container) GetSnapshotKey() string { - if x != nil { - return x.SnapshotKey - } - return "" -} - -func (x *Container) GetCreatedAt() *timestamppb.Timestamp { - if x != nil { - return x.CreatedAt - } - return nil -} - -func (x *Container) GetUpdatedAt() *timestamppb.Timestamp { - if x != nil { - return x.UpdatedAt - } - return nil -} - -func (x *Container) GetExtensions() map[string]*anypb.Any { - if x != nil { - return x.Extensions - } - return nil -} - -func (x *Container) GetSandbox() string { - if x != nil { - return x.Sandbox - } - return "" -} - -type GetContainerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *GetContainerRequest) Reset() { - *x = GetContainerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetContainerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetContainerRequest) ProtoMessage() {} - -func (x *GetContainerRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetContainerRequest.ProtoReflect.Descriptor instead. -func (*GetContainerRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{1} -} - -func (x *GetContainerRequest) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -type GetContainerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` -} - -func (x *GetContainerResponse) Reset() { - *x = GetContainerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetContainerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetContainerResponse) ProtoMessage() {} - -func (x *GetContainerResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetContainerResponse.ProtoReflect.Descriptor instead. -func (*GetContainerResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{2} -} - -func (x *GetContainerResponse) GetContainer() *Container { - if x != nil { - return x.Container - } - return nil -} - -type ListContainersRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Filters contains one or more filters using the syntax defined in the - // containerd filter package. - // - // The returned result will be those that match any of the provided - // filters. Expanded, containers that match the following will be - // returned: - // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] - // - // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` -} - -func (x *ListContainersRequest) Reset() { - *x = ListContainersRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListContainersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListContainersRequest) ProtoMessage() {} - -func (x *ListContainersRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListContainersRequest.ProtoReflect.Descriptor instead. -func (*ListContainersRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{3} -} - -func (x *ListContainersRequest) GetFilters() []string { - if x != nil { - return x.Filters - } - return nil -} - -type ListContainersResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Containers []*Container `protobuf:"bytes,1,rep,name=containers,proto3" json:"containers,omitempty"` -} - -func (x *ListContainersResponse) Reset() { - *x = ListContainersResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListContainersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListContainersResponse) ProtoMessage() {} - -func (x *ListContainersResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListContainersResponse.ProtoReflect.Descriptor instead. -func (*ListContainersResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{4} -} - -func (x *ListContainersResponse) GetContainers() []*Container { - if x != nil { - return x.Containers - } - return nil -} - -type CreateContainerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` -} - -func (x *CreateContainerRequest) Reset() { - *x = CreateContainerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateContainerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateContainerRequest) ProtoMessage() {} - -func (x *CreateContainerRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateContainerRequest.ProtoReflect.Descriptor instead. -func (*CreateContainerRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{5} -} - -func (x *CreateContainerRequest) GetContainer() *Container { - if x != nil { - return x.Container - } - return nil -} - -type CreateContainerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` -} - -func (x *CreateContainerResponse) Reset() { - *x = CreateContainerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateContainerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateContainerResponse) ProtoMessage() {} - -func (x *CreateContainerResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateContainerResponse.ProtoReflect.Descriptor instead. -func (*CreateContainerResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{6} -} - -func (x *CreateContainerResponse) GetContainer() *Container { - if x != nil { - return x.Container - } - return nil -} - -// UpdateContainerRequest updates the metadata on one or more container. -// -// The operation should follow semantics described in -// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, -// unless otherwise qualified. -type UpdateContainerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Container provides the target values, as declared by the mask, for the update. - // - // The ID field must be set. - Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` - // UpdateMask specifies which fields to perform the update on. If empty, - // the operation applies to all fields. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateContainerRequest) Reset() { - *x = UpdateContainerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateContainerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateContainerRequest) ProtoMessage() {} - -func (x *UpdateContainerRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateContainerRequest.ProtoReflect.Descriptor instead. -func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{7} -} - -func (x *UpdateContainerRequest) GetContainer() *Container { - if x != nil { - return x.Container - } - return nil -} - -func (x *UpdateContainerRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -type UpdateContainerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` -} - -func (x *UpdateContainerResponse) Reset() { - *x = UpdateContainerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateContainerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateContainerResponse) ProtoMessage() {} - -func (x *UpdateContainerResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateContainerResponse.ProtoReflect.Descriptor instead. -func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{8} -} - -func (x *UpdateContainerResponse) GetContainer() *Container { - if x != nil { - return x.Container - } - return nil -} - -type DeleteContainerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *DeleteContainerRequest) Reset() { - *x = DeleteContainerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteContainerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteContainerRequest) ProtoMessage() {} - -func (x *DeleteContainerRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteContainerRequest.ProtoReflect.Descriptor instead. -func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{9} -} - -func (x *DeleteContainerRequest) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -type ListContainerMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` -} - -func (x *ListContainerMessage) Reset() { - *x = ListContainerMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListContainerMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListContainerMessage) ProtoMessage() {} - -func (x *ListContainerMessage) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListContainerMessage.ProtoReflect.Descriptor instead. -func (*ListContainerMessage) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{10} -} - -func (x *ListContainerMessage) GetContainer() *Container { - if x != nil { - return x.Container - } - return nil -} - -type Container_Runtime struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Name is the name of the runtime. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Options specify additional runtime initialization options. - Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` -} - -func (x *Container_Runtime) Reset() { - *x = Container_Runtime{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Container_Runtime) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Container_Runtime) ProtoMessage() {} - -func (x *Container_Runtime) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Container_Runtime.ProtoReflect.Descriptor instead. -func (*Container_Runtime) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *Container_Runtime) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Container_Runtime) GetOptions() *anypb.Any { - if x != nil { - return x.Options - } - return nil -} - -var File_github_com_containerd_containerd_api_services_containers_v1_containers_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc = []byte{ - 0x0a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x21, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, - 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x06, 0x0a, - 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x50, 0x0a, 0x06, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x12, 0x4e, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x20, 0x0a, 0x0b, - 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4b, 0x65, - 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5c, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x1a, - 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, - 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x62, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, - 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x31, 0x0a, 0x15, 0x4c, 0x69, 0x73, - 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x16, - 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x73, 0x22, 0x64, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, - 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, - 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x65, 0x0a, 0x17, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x09, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x65, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x4a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x16, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x62, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x4a, - 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, - 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x32, 0xe4, 0x05, 0x0a, 0x0a, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x76, 0x0a, 0x03, 0x47, 0x65, 0x74, - 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x7b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, - 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x38, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x30, 0x01, 0x12, 0x7f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x39, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2f, 0x76, 0x31, - 0x3b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData = file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes = make([]protoimpl.MessageInfo, 14) -var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_goTypes = []interface{}{ - (*Container)(nil), // 0: containerd.services.containers.v1.Container - (*GetContainerRequest)(nil), // 1: containerd.services.containers.v1.GetContainerRequest - (*GetContainerResponse)(nil), // 2: containerd.services.containers.v1.GetContainerResponse - (*ListContainersRequest)(nil), // 3: containerd.services.containers.v1.ListContainersRequest - (*ListContainersResponse)(nil), // 4: containerd.services.containers.v1.ListContainersResponse - (*CreateContainerRequest)(nil), // 5: containerd.services.containers.v1.CreateContainerRequest - (*CreateContainerResponse)(nil), // 6: containerd.services.containers.v1.CreateContainerResponse - (*UpdateContainerRequest)(nil), // 7: containerd.services.containers.v1.UpdateContainerRequest - (*UpdateContainerResponse)(nil), // 8: containerd.services.containers.v1.UpdateContainerResponse - (*DeleteContainerRequest)(nil), // 9: containerd.services.containers.v1.DeleteContainerRequest - (*ListContainerMessage)(nil), // 10: containerd.services.containers.v1.ListContainerMessage - nil, // 11: containerd.services.containers.v1.Container.LabelsEntry - (*Container_Runtime)(nil), // 12: containerd.services.containers.v1.Container.Runtime - nil, // 13: containerd.services.containers.v1.Container.ExtensionsEntry - (*anypb.Any)(nil), // 14: google.protobuf.Any - (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp - (*fieldmaskpb.FieldMask)(nil), // 16: google.protobuf.FieldMask - (*emptypb.Empty)(nil), // 17: google.protobuf.Empty -} -var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_depIdxs = []int32{ - 11, // 0: containerd.services.containers.v1.Container.labels:type_name -> containerd.services.containers.v1.Container.LabelsEntry - 12, // 1: containerd.services.containers.v1.Container.runtime:type_name -> containerd.services.containers.v1.Container.Runtime - 14, // 2: containerd.services.containers.v1.Container.spec:type_name -> google.protobuf.Any - 15, // 3: containerd.services.containers.v1.Container.created_at:type_name -> google.protobuf.Timestamp - 15, // 4: containerd.services.containers.v1.Container.updated_at:type_name -> google.protobuf.Timestamp - 13, // 5: containerd.services.containers.v1.Container.extensions:type_name -> containerd.services.containers.v1.Container.ExtensionsEntry - 0, // 6: containerd.services.containers.v1.GetContainerResponse.container:type_name -> containerd.services.containers.v1.Container - 0, // 7: containerd.services.containers.v1.ListContainersResponse.containers:type_name -> containerd.services.containers.v1.Container - 0, // 8: containerd.services.containers.v1.CreateContainerRequest.container:type_name -> containerd.services.containers.v1.Container - 0, // 9: containerd.services.containers.v1.CreateContainerResponse.container:type_name -> containerd.services.containers.v1.Container - 0, // 10: containerd.services.containers.v1.UpdateContainerRequest.container:type_name -> containerd.services.containers.v1.Container - 16, // 11: containerd.services.containers.v1.UpdateContainerRequest.update_mask:type_name -> google.protobuf.FieldMask - 0, // 12: containerd.services.containers.v1.UpdateContainerResponse.container:type_name -> containerd.services.containers.v1.Container - 0, // 13: containerd.services.containers.v1.ListContainerMessage.container:type_name -> containerd.services.containers.v1.Container - 14, // 14: containerd.services.containers.v1.Container.Runtime.options:type_name -> google.protobuf.Any - 14, // 15: containerd.services.containers.v1.Container.ExtensionsEntry.value:type_name -> google.protobuf.Any - 1, // 16: containerd.services.containers.v1.Containers.Get:input_type -> containerd.services.containers.v1.GetContainerRequest - 3, // 17: containerd.services.containers.v1.Containers.List:input_type -> containerd.services.containers.v1.ListContainersRequest - 3, // 18: containerd.services.containers.v1.Containers.ListStream:input_type -> containerd.services.containers.v1.ListContainersRequest - 5, // 19: containerd.services.containers.v1.Containers.Create:input_type -> containerd.services.containers.v1.CreateContainerRequest - 7, // 20: containerd.services.containers.v1.Containers.Update:input_type -> containerd.services.containers.v1.UpdateContainerRequest - 9, // 21: containerd.services.containers.v1.Containers.Delete:input_type -> containerd.services.containers.v1.DeleteContainerRequest - 2, // 22: containerd.services.containers.v1.Containers.Get:output_type -> containerd.services.containers.v1.GetContainerResponse - 4, // 23: containerd.services.containers.v1.Containers.List:output_type -> containerd.services.containers.v1.ListContainersResponse - 10, // 24: containerd.services.containers.v1.Containers.ListStream:output_type -> containerd.services.containers.v1.ListContainerMessage - 6, // 25: containerd.services.containers.v1.Containers.Create:output_type -> containerd.services.containers.v1.CreateContainerResponse - 8, // 26: containerd.services.containers.v1.Containers.Update:output_type -> containerd.services.containers.v1.UpdateContainerResponse - 17, // 27: containerd.services.containers.v1.Containers.Delete:output_type -> google.protobuf.Empty - 22, // [22:28] is the sub-list for method output_type - 16, // [16:22] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_init() } -func file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_init() { - if File_github_com_containerd_containerd_api_services_containers_v1_containers_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Container); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetContainerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetContainerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListContainersRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListContainersResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateContainerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateContainerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateContainerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateContainerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteContainerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListContainerMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Container_Runtime); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc, - NumEnums: 0, - NumMessages: 14, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_services_containers_v1_containers_proto = out.File - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc = nil - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_goTypes = nil - file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto b/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto deleted file mode 100644 index 3de07ffbd6e..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto +++ /dev/null @@ -1,181 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.services.containers.v1; - -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers"; - -// Containers provides metadata storage for containers used in the execution -// service. -// -// The objects here provide an state-independent view of containers for use in -// management and resource pinning. From that perspective, containers do not -// have a "state" but rather this is the set of resources that will be -// considered in use by the container. -// -// From the perspective of the execution service, these objects represent the -// base parameters for creating a container process. -// -// In general, when looking to add fields for this type, first ask yourself -// whether or not the function of the field has to do with runtime execution or -// is invariant of the runtime state of the container. If it has to do with -// runtime, or changes as the "container" is started and stops, it probably -// doesn't belong on this object. -service Containers { - rpc Get(GetContainerRequest) returns (GetContainerResponse); - rpc List(ListContainersRequest) returns (ListContainersResponse); - rpc ListStream(ListContainersRequest) returns (stream ListContainerMessage); - rpc Create(CreateContainerRequest) returns (CreateContainerResponse); - rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse); - rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty); -} - -message Container { - // ID is the user-specified identifier. - // - // This field may not be updated. - string id = 1; - - // Labels provides an area to include arbitrary data on containers. - // - // The combined size of a key/value pair cannot exceed 4096 bytes. - // - // Note that to add a new value to this field, read the existing set and - // include the entire result in the update call. - map labels = 2; - - // Image contains the reference of the image used to build the - // specification and snapshots for running this container. - // - // If this field is updated, the spec and rootfs needed to updated, as well. - string image = 3; - - message Runtime { - // Name is the name of the runtime. - string name = 1; - // Options specify additional runtime initialization options. - google.protobuf.Any options = 2; - } - // Runtime specifies which runtime to use for executing this container. - Runtime runtime = 4; - - // Spec to be used when creating the container. This is runtime specific. - google.protobuf.Any spec = 5; - - // Snapshotter specifies the snapshotter name used for rootfs - string snapshotter = 6; - - // SnapshotKey specifies the snapshot key to use for the container's root - // filesystem. When starting a task from this container, a caller should - // look up the mounts from the snapshot service and include those on the - // task create request. - // - // Snapshots referenced in this field will not be garbage collected. - // - // This field is set to empty when the rootfs is not a snapshot. - // - // This field may be updated. - string snapshot_key = 7; - - // CreatedAt is the time the container was first created. - google.protobuf.Timestamp created_at = 8; - - // UpdatedAt is the last time the container was mutated. - google.protobuf.Timestamp updated_at = 9; - - // Extensions allow clients to provide zero or more blobs that are directly - // associated with the container. One may provide protobuf, json, or other - // encoding formats. The primary use of this is to further decorate the - // container object with fields that may be specific to a client integration. - // - // The key portion of this map should identify a "name" for the extension - // that should be unique against other extensions. When updating extension - // data, one should only update the specified extension using field paths - // to select a specific map key. - map extensions = 10; - - // Sandbox ID this container belongs to. - string sandbox = 11; -} - -message GetContainerRequest { - string id = 1; -} - -message GetContainerResponse { - Container container = 1; -} - -message ListContainersRequest { - // Filters contains one or more filters using the syntax defined in the - // containerd filter package. - // - // The returned result will be those that match any of the provided - // filters. Expanded, containers that match the following will be - // returned: - // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] - // - // If filters is zero-length or nil, all items will be returned. - repeated string filters = 1; -} - -message ListContainersResponse { - repeated Container containers = 1; -} - -message CreateContainerRequest { - Container container = 1; -} - -message CreateContainerResponse { - Container container = 1; -} - -// UpdateContainerRequest updates the metadata on one or more container. -// -// The operation should follow semantics described in -// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, -// unless otherwise qualified. -message UpdateContainerRequest { - // Container provides the target values, as declared by the mask, for the update. - // - // The ID field must be set. - Container container = 1; - - // UpdateMask specifies which fields to perform the update on. If empty, - // the operation applies to all fields. - google.protobuf.FieldMask update_mask = 2; -} - -message UpdateContainerResponse { - Container container = 1; -} - -message DeleteContainerRequest { - string id = 1; -} - -message ListContainerMessage { - Container container = 1; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go b/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go deleted file mode 100644 index 93dab77d10d..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go +++ /dev/null @@ -1,316 +0,0 @@ -//go:build !no_grpc - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.20.1 -// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto - -package containers - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// ContainersClient is the client API for Containers service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ContainersClient interface { - Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) - List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) - ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) - Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) - Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) - Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) -} - -type containersClient struct { - cc grpc.ClientConnInterface -} - -func NewContainersClient(cc grpc.ClientConnInterface) ContainersClient { - return &containersClient{cc} -} - -func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) { - out := new(GetContainerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) { - out := new(ListContainersResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *containersClient) ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &Containers_ServiceDesc.Streams[0], "/containerd.services.containers.v1.Containers/ListStream", opts...) - if err != nil { - return nil, err - } - x := &containersListStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Containers_ListStreamClient interface { - Recv() (*ListContainerMessage, error) - grpc.ClientStream -} - -type containersListStreamClient struct { - grpc.ClientStream -} - -func (x *containersListStreamClient) Recv() (*ListContainerMessage, error) { - m := new(ListContainerMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { - out := new(CreateContainerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { - out := new(UpdateContainerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ContainersServer is the server API for Containers service. -// All implementations must embed UnimplementedContainersServer -// for forward compatibility -type ContainersServer interface { - Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) - List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) - ListStream(*ListContainersRequest, Containers_ListStreamServer) error - Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) - Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) - Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error) - mustEmbedUnimplementedContainersServer() -} - -// UnimplementedContainersServer must be embedded to have forward compatible implementations. -type UnimplementedContainersServer struct { -} - -func (UnimplementedContainersServer) Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (UnimplementedContainersServer) List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (UnimplementedContainersServer) ListStream(*ListContainersRequest, Containers_ListStreamServer) error { - return status.Errorf(codes.Unimplemented, "method ListStream not implemented") -} -func (UnimplementedContainersServer) Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (UnimplementedContainersServer) Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (UnimplementedContainersServer) Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (UnimplementedContainersServer) mustEmbedUnimplementedContainersServer() {} - -// UnsafeContainersServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ContainersServer will -// result in compilation errors. -type UnsafeContainersServer interface { - mustEmbedUnimplementedContainersServer() -} - -func RegisterContainersServer(s grpc.ServiceRegistrar, srv ContainersServer) { - s.RegisterService(&Containers_ServiceDesc, srv) -} - -func _Containers_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Containers_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListContainersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Containers_ListStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListContainersRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ContainersServer).ListStream(m, &containersListStreamServer{stream}) -} - -type Containers_ListStreamServer interface { - Send(*ListContainerMessage) error - grpc.ServerStream -} - -type containersListStreamServer struct { - grpc.ServerStream -} - -func (x *containersListStreamServer) Send(m *ListContainerMessage) error { - return x.ServerStream.SendMsg(m) -} - -func _Containers_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Containers_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Containers_ServiceDesc is the grpc.ServiceDesc for Containers service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Containers_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.containers.v1.Containers", - HandlerType: (*ContainersServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Get", - Handler: _Containers_Get_Handler, - }, - { - MethodName: "List", - Handler: _Containers_List_Handler, - }, - { - MethodName: "Create", - Handler: _Containers_Create_Handler, - }, - { - MethodName: "Update", - Handler: _Containers_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Containers_Delete_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ListStream", - Handler: _Containers_ListStream_Handler, - ServerStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/containers/v1/containers.proto", -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_ttrpc.pb.go b/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_ttrpc.pb.go deleted file mode 100644 index 8090011df33..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_ttrpc.pb.go +++ /dev/null @@ -1,174 +0,0 @@ -// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. -// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto -package containers - -import ( - context "context" - ttrpc "github.com/containerd/ttrpc" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -type TTRPCContainersService interface { - Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) - List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) - ListStream(context.Context, *ListContainersRequest, TTRPCContainers_ListStreamServer) error - Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) - Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) - Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error) -} - -type TTRPCContainers_ListStreamServer interface { - Send(*ListContainerMessage) error - ttrpc.StreamServer -} - -type ttrpccontainersListStreamServer struct { - ttrpc.StreamServer -} - -func (x *ttrpccontainersListStreamServer) Send(m *ListContainerMessage) error { - return x.StreamServer.SendMsg(m) -} - -func RegisterTTRPCContainersService(srv *ttrpc.Server, svc TTRPCContainersService) { - srv.RegisterService("containerd.services.containers.v1.Containers", &ttrpc.ServiceDesc{ - Methods: map[string]ttrpc.Method{ - "Get": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req GetContainerRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Get(ctx, &req) - }, - "List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ListContainersRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.List(ctx, &req) - }, - "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CreateContainerRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Create(ctx, &req) - }, - "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req UpdateContainerRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Update(ctx, &req) - }, - "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req DeleteContainerRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Delete(ctx, &req) - }, - }, - Streams: map[string]ttrpc.Stream{ - "ListStream": { - Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) { - m := new(ListContainersRequest) - if err := stream.RecvMsg(m); err != nil { - return nil, err - } - return nil, svc.ListStream(ctx, m, &ttrpccontainersListStreamServer{stream}) - }, - StreamingClient: false, - StreamingServer: true, - }, - }, - }) -} - -type TTRPCContainersClient interface { - Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) - List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) - ListStream(context.Context, *ListContainersRequest) (TTRPCContainers_ListStreamClient, error) - Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) - Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) - Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error) -} - -type ttrpccontainersClient struct { - client *ttrpc.Client -} - -func NewTTRPCContainersClient(client *ttrpc.Client) TTRPCContainersClient { - return &ttrpccontainersClient{ - client: client, - } -} - -func (c *ttrpccontainersClient) Get(ctx context.Context, req *GetContainerRequest) (*GetContainerResponse, error) { - var resp GetContainerResponse - if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Get", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpccontainersClient) List(ctx context.Context, req *ListContainersRequest) (*ListContainersResponse, error) { - var resp ListContainersResponse - if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "List", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpccontainersClient) ListStream(ctx context.Context, req *ListContainersRequest) (TTRPCContainers_ListStreamClient, error) { - stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{ - StreamingClient: false, - StreamingServer: true, - }, "containerd.services.containers.v1.Containers", "ListStream", req) - if err != nil { - return nil, err - } - x := &ttrpccontainersListStreamClient{stream} - return x, nil -} - -type TTRPCContainers_ListStreamClient interface { - Recv() (*ListContainerMessage, error) - ttrpc.ClientStream -} - -type ttrpccontainersListStreamClient struct { - ttrpc.ClientStream -} - -func (x *ttrpccontainersListStreamClient) Recv() (*ListContainerMessage, error) { - m := new(ListContainerMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *ttrpccontainersClient) Create(ctx context.Context, req *CreateContainerRequest) (*CreateContainerResponse, error) { - var resp CreateContainerResponse - if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Create", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpccontainersClient) Update(ctx context.Context, req *UpdateContainerRequest) (*UpdateContainerResponse, error) { - var resp UpdateContainerResponse - if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Update", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpccontainersClient) Delete(ctx context.Context, req *DeleteContainerRequest) (*emptypb.Empty, error) { - var resp emptypb.Empty - if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Delete", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/doc.go b/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/doc.go deleted file mode 100644 index a6ef491ce5f..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package containers diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/doc.go b/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/doc.go deleted file mode 100644 index 0888ba8a85f..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package tasks diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go b/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go deleted file mode 100644 index 1a55d696dd5..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go +++ /dev/null @@ -1,2359 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto - -package tasks - -import ( - types "github.com/containerd/containerd/api/types" - task "github.com/containerd/containerd/api/types/task" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - emptypb "google.golang.org/protobuf/types/known/emptypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type CreateTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // RootFS provides the pre-chroot mounts to perform in the shim before - // executing the container task. - // - // These are for mounts that cannot be performed in the user namespace. - // Typically, these mounts should be resolved from snapshots specified on - // the container object. - Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"` - Checkpoint *types.Descriptor `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - Options *anypb.Any `protobuf:"bytes,9,opt,name=options,proto3" json:"options,omitempty"` - RuntimePath string `protobuf:"bytes,10,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"` -} - -func (x *CreateTaskRequest) Reset() { - *x = CreateTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateTaskRequest) ProtoMessage() {} - -func (x *CreateTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateTaskRequest.ProtoReflect.Descriptor instead. -func (*CreateTaskRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{0} -} - -func (x *CreateTaskRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *CreateTaskRequest) GetRootfs() []*types.Mount { - if x != nil { - return x.Rootfs - } - return nil -} - -func (x *CreateTaskRequest) GetStdin() string { - if x != nil { - return x.Stdin - } - return "" -} - -func (x *CreateTaskRequest) GetStdout() string { - if x != nil { - return x.Stdout - } - return "" -} - -func (x *CreateTaskRequest) GetStderr() string { - if x != nil { - return x.Stderr - } - return "" -} - -func (x *CreateTaskRequest) GetTerminal() bool { - if x != nil { - return x.Terminal - } - return false -} - -func (x *CreateTaskRequest) GetCheckpoint() *types.Descriptor { - if x != nil { - return x.Checkpoint - } - return nil -} - -func (x *CreateTaskRequest) GetOptions() *anypb.Any { - if x != nil { - return x.Options - } - return nil -} - -func (x *CreateTaskRequest) GetRuntimePath() string { - if x != nil { - return x.RuntimePath - } - return "" -} - -type CreateTaskResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` -} - -func (x *CreateTaskResponse) Reset() { - *x = CreateTaskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateTaskResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateTaskResponse) ProtoMessage() {} - -func (x *CreateTaskResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateTaskResponse.ProtoReflect.Descriptor instead. -func (*CreateTaskResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{1} -} - -func (x *CreateTaskResponse) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *CreateTaskResponse) GetPid() uint32 { - if x != nil { - return x.Pid - } - return 0 -} - -type StartRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` -} - -func (x *StartRequest) Reset() { - *x = StartRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartRequest) ProtoMessage() {} - -func (x *StartRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartRequest.ProtoReflect.Descriptor instead. -func (*StartRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{2} -} - -func (x *StartRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *StartRequest) GetExecID() string { - if x != nil { - return x.ExecID - } - return "" -} - -type StartResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` -} - -func (x *StartResponse) Reset() { - *x = StartResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartResponse) ProtoMessage() {} - -func (x *StartResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartResponse.ProtoReflect.Descriptor instead. -func (*StartResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{3} -} - -func (x *StartResponse) GetPid() uint32 { - if x != nil { - return x.Pid - } - return 0 -} - -type DeleteTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` -} - -func (x *DeleteTaskRequest) Reset() { - *x = DeleteTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteTaskRequest) ProtoMessage() {} - -func (x *DeleteTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteTaskRequest.ProtoReflect.Descriptor instead. -func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{4} -} - -func (x *DeleteTaskRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -type DeleteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` -} - -func (x *DeleteResponse) Reset() { - *x = DeleteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteResponse) ProtoMessage() {} - -func (x *DeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteResponse.ProtoReflect.Descriptor instead. -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{5} -} - -func (x *DeleteResponse) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -func (x *DeleteResponse) GetPid() uint32 { - if x != nil { - return x.Pid - } - return 0 -} - -func (x *DeleteResponse) GetExitStatus() uint32 { - if x != nil { - return x.ExitStatus - } - return 0 -} - -func (x *DeleteResponse) GetExitedAt() *timestamppb.Timestamp { - if x != nil { - return x.ExitedAt - } - return nil -} - -type DeleteProcessRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` -} - -func (x *DeleteProcessRequest) Reset() { - *x = DeleteProcessRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteProcessRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteProcessRequest) ProtoMessage() {} - -func (x *DeleteProcessRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteProcessRequest.ProtoReflect.Descriptor instead. -func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{6} -} - -func (x *DeleteProcessRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *DeleteProcessRequest) GetExecID() string { - if x != nil { - return x.ExecID - } - return "" -} - -type GetRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` -} - -func (x *GetRequest) Reset() { - *x = GetRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetRequest) ProtoMessage() {} - -func (x *GetRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. -func (*GetRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{7} -} - -func (x *GetRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *GetRequest) GetExecID() string { - if x != nil { - return x.ExecID - } - return "" -} - -type GetResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Process *task.Process `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` -} - -func (x *GetResponse) Reset() { - *x = GetResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetResponse) ProtoMessage() {} - -func (x *GetResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. -func (*GetResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{8} -} - -func (x *GetResponse) GetProcess() *task.Process { - if x != nil { - return x.Process - } - return nil -} - -type ListTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` -} - -func (x *ListTasksRequest) Reset() { - *x = ListTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTasksRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTasksRequest) ProtoMessage() {} - -func (x *ListTasksRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTasksRequest.ProtoReflect.Descriptor instead. -func (*ListTasksRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{9} -} - -func (x *ListTasksRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -type ListTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tasks []*task.Process `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` -} - -func (x *ListTasksResponse) Reset() { - *x = ListTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTasksResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTasksResponse) ProtoMessage() {} - -func (x *ListTasksResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTasksResponse.ProtoReflect.Descriptor instead. -func (*ListTasksResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{10} -} - -func (x *ListTasksResponse) GetTasks() []*task.Process { - if x != nil { - return x.Tasks - } - return nil -} - -type KillRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` - All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` -} - -func (x *KillRequest) Reset() { - *x = KillRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *KillRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*KillRequest) ProtoMessage() {} - -func (x *KillRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use KillRequest.ProtoReflect.Descriptor instead. -func (*KillRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{11} -} - -func (x *KillRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *KillRequest) GetExecID() string { - if x != nil { - return x.ExecID - } - return "" -} - -func (x *KillRequest) GetSignal() uint32 { - if x != nil { - return x.Signal - } - return 0 -} - -func (x *KillRequest) GetAll() bool { - if x != nil { - return x.All - } - return false -} - -type ExecProcessRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Stdin string `protobuf:"bytes,2,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,3,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,4,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,5,opt,name=terminal,proto3" json:"terminal,omitempty"` - // Spec for starting a process in the target container. - // - // For runc, this is a process spec, for example. - Spec *anypb.Any `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"` - // id of the exec process - ExecID string `protobuf:"bytes,7,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` -} - -func (x *ExecProcessRequest) Reset() { - *x = ExecProcessRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExecProcessRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExecProcessRequest) ProtoMessage() {} - -func (x *ExecProcessRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExecProcessRequest.ProtoReflect.Descriptor instead. -func (*ExecProcessRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{12} -} - -func (x *ExecProcessRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *ExecProcessRequest) GetStdin() string { - if x != nil { - return x.Stdin - } - return "" -} - -func (x *ExecProcessRequest) GetStdout() string { - if x != nil { - return x.Stdout - } - return "" -} - -func (x *ExecProcessRequest) GetStderr() string { - if x != nil { - return x.Stderr - } - return "" -} - -func (x *ExecProcessRequest) GetTerminal() bool { - if x != nil { - return x.Terminal - } - return false -} - -func (x *ExecProcessRequest) GetSpec() *anypb.Any { - if x != nil { - return x.Spec - } - return nil -} - -func (x *ExecProcessRequest) GetExecID() string { - if x != nil { - return x.ExecID - } - return "" -} - -type ExecProcessResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ExecProcessResponse) Reset() { - *x = ExecProcessResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExecProcessResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExecProcessResponse) ProtoMessage() {} - -func (x *ExecProcessResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExecProcessResponse.ProtoReflect.Descriptor instead. -func (*ExecProcessResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{13} -} - -type ResizePtyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` - Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` -} - -func (x *ResizePtyRequest) Reset() { - *x = ResizePtyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResizePtyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResizePtyRequest) ProtoMessage() {} - -func (x *ResizePtyRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResizePtyRequest.ProtoReflect.Descriptor instead. -func (*ResizePtyRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{14} -} - -func (x *ResizePtyRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *ResizePtyRequest) GetExecID() string { - if x != nil { - return x.ExecID - } - return "" -} - -func (x *ResizePtyRequest) GetWidth() uint32 { - if x != nil { - return x.Width - } - return 0 -} - -func (x *ResizePtyRequest) GetHeight() uint32 { - if x != nil { - return x.Height - } - return 0 -} - -type CloseIORequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` -} - -func (x *CloseIORequest) Reset() { - *x = CloseIORequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CloseIORequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloseIORequest) ProtoMessage() {} - -func (x *CloseIORequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloseIORequest.ProtoReflect.Descriptor instead. -func (*CloseIORequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{15} -} - -func (x *CloseIORequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *CloseIORequest) GetExecID() string { - if x != nil { - return x.ExecID - } - return "" -} - -func (x *CloseIORequest) GetStdin() bool { - if x != nil { - return x.Stdin - } - return false -} - -type PauseTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` -} - -func (x *PauseTaskRequest) Reset() { - *x = PauseTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PauseTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PauseTaskRequest) ProtoMessage() {} - -func (x *PauseTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PauseTaskRequest.ProtoReflect.Descriptor instead. -func (*PauseTaskRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{16} -} - -func (x *PauseTaskRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -type ResumeTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` -} - -func (x *ResumeTaskRequest) Reset() { - *x = ResumeTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResumeTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResumeTaskRequest) ProtoMessage() {} - -func (x *ResumeTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResumeTaskRequest.ProtoReflect.Descriptor instead. -func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{17} -} - -func (x *ResumeTaskRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -type ListPidsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` -} - -func (x *ListPidsRequest) Reset() { - *x = ListPidsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListPidsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListPidsRequest) ProtoMessage() {} - -func (x *ListPidsRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListPidsRequest.ProtoReflect.Descriptor instead. -func (*ListPidsRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{18} -} - -func (x *ListPidsRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -type ListPidsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Processes includes the process ID and additional process information - Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` -} - -func (x *ListPidsResponse) Reset() { - *x = ListPidsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListPidsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListPidsResponse) ProtoMessage() {} - -func (x *ListPidsResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListPidsResponse.ProtoReflect.Descriptor instead. -func (*ListPidsResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{19} -} - -func (x *ListPidsResponse) GetProcesses() []*task.ProcessInfo { - if x != nil { - return x.Processes - } - return nil -} - -type CheckpointTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ParentCheckpoint string `protobuf:"bytes,2,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` - Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` -} - -func (x *CheckpointTaskRequest) Reset() { - *x = CheckpointTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CheckpointTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CheckpointTaskRequest) ProtoMessage() {} - -func (x *CheckpointTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CheckpointTaskRequest.ProtoReflect.Descriptor instead. -func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{20} -} - -func (x *CheckpointTaskRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *CheckpointTaskRequest) GetParentCheckpoint() string { - if x != nil { - return x.ParentCheckpoint - } - return "" -} - -func (x *CheckpointTaskRequest) GetOptions() *anypb.Any { - if x != nil { - return x.Options - } - return nil -} - -type CheckpointTaskResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Descriptors []*types.Descriptor `protobuf:"bytes,1,rep,name=descriptors,proto3" json:"descriptors,omitempty"` -} - -func (x *CheckpointTaskResponse) Reset() { - *x = CheckpointTaskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CheckpointTaskResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CheckpointTaskResponse) ProtoMessage() {} - -func (x *CheckpointTaskResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CheckpointTaskResponse.ProtoReflect.Descriptor instead. -func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{21} -} - -func (x *CheckpointTaskResponse) GetDescriptors() []*types.Descriptor { - if x != nil { - return x.Descriptors - } - return nil -} - -type UpdateTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Resources *anypb.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` - Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *UpdateTaskRequest) Reset() { - *x = UpdateTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateTaskRequest) ProtoMessage() {} - -func (x *UpdateTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateTaskRequest.ProtoReflect.Descriptor instead. -func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{22} -} - -func (x *UpdateTaskRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *UpdateTaskRequest) GetResources() *anypb.Any { - if x != nil { - return x.Resources - } - return nil -} - -func (x *UpdateTaskRequest) GetAnnotations() map[string]string { - if x != nil { - return x.Annotations - } - return nil -} - -type MetricsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` -} - -func (x *MetricsRequest) Reset() { - *x = MetricsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetricsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetricsRequest) ProtoMessage() {} - -func (x *MetricsRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetricsRequest.ProtoReflect.Descriptor instead. -func (*MetricsRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{23} -} - -func (x *MetricsRequest) GetFilters() []string { - if x != nil { - return x.Filters - } - return nil -} - -type MetricsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Metrics []*types.Metric `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"` -} - -func (x *MetricsResponse) Reset() { - *x = MetricsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetricsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetricsResponse) ProtoMessage() {} - -func (x *MetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. -func (*MetricsResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{24} -} - -func (x *MetricsResponse) GetMetrics() []*types.Metric { - if x != nil { - return x.Metrics - } - return nil -} - -type WaitRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` -} - -func (x *WaitRequest) Reset() { - *x = WaitRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WaitRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WaitRequest) ProtoMessage() {} - -func (x *WaitRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WaitRequest.ProtoReflect.Descriptor instead. -func (*WaitRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{25} -} - -func (x *WaitRequest) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *WaitRequest) GetExecID() string { - if x != nil { - return x.ExecID - } - return "" -} - -type WaitResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` -} - -func (x *WaitResponse) Reset() { - *x = WaitResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WaitResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WaitResponse) ProtoMessage() {} - -func (x *WaitResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WaitResponse.ProtoReflect.Descriptor instead. -func (*WaitResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{26} -} - -func (x *WaitResponse) GetExitStatus() uint32 { - if x != nil { - return x.ExitStatus - } - return 0 -} - -func (x *WaitResponse) GetExitedAt() *timestamppb.Timestamp { - if x != nil { - return x.ExitedAt - } - return nil -} - -var File_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc = []byte{ - 0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, - 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, - 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3b, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, - 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x02, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x2f, 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x61, 0x6c, 0x12, 0x3c, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, - 0x61, 0x74, 0x68, 0x22, 0x49, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, - 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x4a, - 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x21, 0x0a, 0x0d, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x36, 0x0a, - 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8c, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, - 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, - 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, - 0x65, 0x64, 0x41, 0x74, 0x22, 0x52, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, - 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, - 0x49, 0x64, 0x22, 0x45, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x36, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, - 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x22, 0x2a, 0x0a, 0x10, 0x4c, 0x69, 0x73, - 0x74, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x47, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, - 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x61, - 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x22, 0x73, - 0x0a, 0x0b, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, - 0x61, 0x6c, 0x6c, 0x22, 0xdc, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, - 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, - 0x65, 0x72, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, - 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, - 0x63, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, - 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x10, 0x52, 0x65, 0x73, - 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, 0x64, - 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x12, - 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x62, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x73, 0x65, - 0x49, 0x4f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, - 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, - 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x22, 0x35, 0x0a, 0x10, 0x50, - 0x61, 0x75, 0x73, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x49, 0x64, 0x22, 0x36, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x34, 0x0a, 0x0f, 0x4c, 0x69, - 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, - 0x22, 0x52, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, - 0x73, 0x73, 0x65, 0x73, 0x22, 0x97, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2e, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x58, - 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x8e, 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x0e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x45, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x49, 0x0a, 0x0b, - 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, - 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x68, 0x0a, 0x0c, 0x57, 0x61, 0x69, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, - 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, - 0x74, 0x32, 0xdc, 0x0c, 0x0a, 0x05, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x6b, 0x0a, 0x06, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, - 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x06, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, - 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x28, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, - 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x04, 0x4b, - 0x69, 0x6c, 0x6c, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x50, 0x0a, 0x04, 0x45, 0x78, 0x65, 0x63, 0x12, 0x30, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, - 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x53, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x69, - 0x7a, 0x65, 0x50, 0x74, 0x79, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, - 0x07, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, - 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, - 0x0a, 0x05, 0x50, 0x61, 0x75, 0x73, 0x65, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, - 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x75, 0x73, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x51, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, - 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x69, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x12, 0x2d, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, - 0x0a, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x33, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x66, 0x0a, 0x07, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x5d, 0x0a, 0x04, 0x57, 0x61, 0x69, 0x74, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, - 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x74, 0x61, 0x73, 0x6b, 0x73, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData = file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 28) -var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_goTypes = []interface{}{ - (*CreateTaskRequest)(nil), // 0: containerd.services.tasks.v1.CreateTaskRequest - (*CreateTaskResponse)(nil), // 1: containerd.services.tasks.v1.CreateTaskResponse - (*StartRequest)(nil), // 2: containerd.services.tasks.v1.StartRequest - (*StartResponse)(nil), // 3: containerd.services.tasks.v1.StartResponse - (*DeleteTaskRequest)(nil), // 4: containerd.services.tasks.v1.DeleteTaskRequest - (*DeleteResponse)(nil), // 5: containerd.services.tasks.v1.DeleteResponse - (*DeleteProcessRequest)(nil), // 6: containerd.services.tasks.v1.DeleteProcessRequest - (*GetRequest)(nil), // 7: containerd.services.tasks.v1.GetRequest - (*GetResponse)(nil), // 8: containerd.services.tasks.v1.GetResponse - (*ListTasksRequest)(nil), // 9: containerd.services.tasks.v1.ListTasksRequest - (*ListTasksResponse)(nil), // 10: containerd.services.tasks.v1.ListTasksResponse - (*KillRequest)(nil), // 11: containerd.services.tasks.v1.KillRequest - (*ExecProcessRequest)(nil), // 12: containerd.services.tasks.v1.ExecProcessRequest - (*ExecProcessResponse)(nil), // 13: containerd.services.tasks.v1.ExecProcessResponse - (*ResizePtyRequest)(nil), // 14: containerd.services.tasks.v1.ResizePtyRequest - (*CloseIORequest)(nil), // 15: containerd.services.tasks.v1.CloseIORequest - (*PauseTaskRequest)(nil), // 16: containerd.services.tasks.v1.PauseTaskRequest - (*ResumeTaskRequest)(nil), // 17: containerd.services.tasks.v1.ResumeTaskRequest - (*ListPidsRequest)(nil), // 18: containerd.services.tasks.v1.ListPidsRequest - (*ListPidsResponse)(nil), // 19: containerd.services.tasks.v1.ListPidsResponse - (*CheckpointTaskRequest)(nil), // 20: containerd.services.tasks.v1.CheckpointTaskRequest - (*CheckpointTaskResponse)(nil), // 21: containerd.services.tasks.v1.CheckpointTaskResponse - (*UpdateTaskRequest)(nil), // 22: containerd.services.tasks.v1.UpdateTaskRequest - (*MetricsRequest)(nil), // 23: containerd.services.tasks.v1.MetricsRequest - (*MetricsResponse)(nil), // 24: containerd.services.tasks.v1.MetricsResponse - (*WaitRequest)(nil), // 25: containerd.services.tasks.v1.WaitRequest - (*WaitResponse)(nil), // 26: containerd.services.tasks.v1.WaitResponse - nil, // 27: containerd.services.tasks.v1.UpdateTaskRequest.AnnotationsEntry - (*types.Mount)(nil), // 28: containerd.types.Mount - (*types.Descriptor)(nil), // 29: containerd.types.Descriptor - (*anypb.Any)(nil), // 30: google.protobuf.Any - (*timestamppb.Timestamp)(nil), // 31: google.protobuf.Timestamp - (*task.Process)(nil), // 32: containerd.v1.types.Process - (*task.ProcessInfo)(nil), // 33: containerd.v1.types.ProcessInfo - (*types.Metric)(nil), // 34: containerd.types.Metric - (*emptypb.Empty)(nil), // 35: google.protobuf.Empty -} -var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_depIdxs = []int32{ - 28, // 0: containerd.services.tasks.v1.CreateTaskRequest.rootfs:type_name -> containerd.types.Mount - 29, // 1: containerd.services.tasks.v1.CreateTaskRequest.checkpoint:type_name -> containerd.types.Descriptor - 30, // 2: containerd.services.tasks.v1.CreateTaskRequest.options:type_name -> google.protobuf.Any - 31, // 3: containerd.services.tasks.v1.DeleteResponse.exited_at:type_name -> google.protobuf.Timestamp - 32, // 4: containerd.services.tasks.v1.GetResponse.process:type_name -> containerd.v1.types.Process - 32, // 5: containerd.services.tasks.v1.ListTasksResponse.tasks:type_name -> containerd.v1.types.Process - 30, // 6: containerd.services.tasks.v1.ExecProcessRequest.spec:type_name -> google.protobuf.Any - 33, // 7: containerd.services.tasks.v1.ListPidsResponse.processes:type_name -> containerd.v1.types.ProcessInfo - 30, // 8: containerd.services.tasks.v1.CheckpointTaskRequest.options:type_name -> google.protobuf.Any - 29, // 9: containerd.services.tasks.v1.CheckpointTaskResponse.descriptors:type_name -> containerd.types.Descriptor - 30, // 10: containerd.services.tasks.v1.UpdateTaskRequest.resources:type_name -> google.protobuf.Any - 27, // 11: containerd.services.tasks.v1.UpdateTaskRequest.annotations:type_name -> containerd.services.tasks.v1.UpdateTaskRequest.AnnotationsEntry - 34, // 12: containerd.services.tasks.v1.MetricsResponse.metrics:type_name -> containerd.types.Metric - 31, // 13: containerd.services.tasks.v1.WaitResponse.exited_at:type_name -> google.protobuf.Timestamp - 0, // 14: containerd.services.tasks.v1.Tasks.Create:input_type -> containerd.services.tasks.v1.CreateTaskRequest - 2, // 15: containerd.services.tasks.v1.Tasks.Start:input_type -> containerd.services.tasks.v1.StartRequest - 4, // 16: containerd.services.tasks.v1.Tasks.Delete:input_type -> containerd.services.tasks.v1.DeleteTaskRequest - 6, // 17: containerd.services.tasks.v1.Tasks.DeleteProcess:input_type -> containerd.services.tasks.v1.DeleteProcessRequest - 7, // 18: containerd.services.tasks.v1.Tasks.Get:input_type -> containerd.services.tasks.v1.GetRequest - 9, // 19: containerd.services.tasks.v1.Tasks.List:input_type -> containerd.services.tasks.v1.ListTasksRequest - 11, // 20: containerd.services.tasks.v1.Tasks.Kill:input_type -> containerd.services.tasks.v1.KillRequest - 12, // 21: containerd.services.tasks.v1.Tasks.Exec:input_type -> containerd.services.tasks.v1.ExecProcessRequest - 14, // 22: containerd.services.tasks.v1.Tasks.ResizePty:input_type -> containerd.services.tasks.v1.ResizePtyRequest - 15, // 23: containerd.services.tasks.v1.Tasks.CloseIO:input_type -> containerd.services.tasks.v1.CloseIORequest - 16, // 24: containerd.services.tasks.v1.Tasks.Pause:input_type -> containerd.services.tasks.v1.PauseTaskRequest - 17, // 25: containerd.services.tasks.v1.Tasks.Resume:input_type -> containerd.services.tasks.v1.ResumeTaskRequest - 18, // 26: containerd.services.tasks.v1.Tasks.ListPids:input_type -> containerd.services.tasks.v1.ListPidsRequest - 20, // 27: containerd.services.tasks.v1.Tasks.Checkpoint:input_type -> containerd.services.tasks.v1.CheckpointTaskRequest - 22, // 28: containerd.services.tasks.v1.Tasks.Update:input_type -> containerd.services.tasks.v1.UpdateTaskRequest - 23, // 29: containerd.services.tasks.v1.Tasks.Metrics:input_type -> containerd.services.tasks.v1.MetricsRequest - 25, // 30: containerd.services.tasks.v1.Tasks.Wait:input_type -> containerd.services.tasks.v1.WaitRequest - 1, // 31: containerd.services.tasks.v1.Tasks.Create:output_type -> containerd.services.tasks.v1.CreateTaskResponse - 3, // 32: containerd.services.tasks.v1.Tasks.Start:output_type -> containerd.services.tasks.v1.StartResponse - 5, // 33: containerd.services.tasks.v1.Tasks.Delete:output_type -> containerd.services.tasks.v1.DeleteResponse - 5, // 34: containerd.services.tasks.v1.Tasks.DeleteProcess:output_type -> containerd.services.tasks.v1.DeleteResponse - 8, // 35: containerd.services.tasks.v1.Tasks.Get:output_type -> containerd.services.tasks.v1.GetResponse - 10, // 36: containerd.services.tasks.v1.Tasks.List:output_type -> containerd.services.tasks.v1.ListTasksResponse - 35, // 37: containerd.services.tasks.v1.Tasks.Kill:output_type -> google.protobuf.Empty - 35, // 38: containerd.services.tasks.v1.Tasks.Exec:output_type -> google.protobuf.Empty - 35, // 39: containerd.services.tasks.v1.Tasks.ResizePty:output_type -> google.protobuf.Empty - 35, // 40: containerd.services.tasks.v1.Tasks.CloseIO:output_type -> google.protobuf.Empty - 35, // 41: containerd.services.tasks.v1.Tasks.Pause:output_type -> google.protobuf.Empty - 35, // 42: containerd.services.tasks.v1.Tasks.Resume:output_type -> google.protobuf.Empty - 19, // 43: containerd.services.tasks.v1.Tasks.ListPids:output_type -> containerd.services.tasks.v1.ListPidsResponse - 21, // 44: containerd.services.tasks.v1.Tasks.Checkpoint:output_type -> containerd.services.tasks.v1.CheckpointTaskResponse - 35, // 45: containerd.services.tasks.v1.Tasks.Update:output_type -> google.protobuf.Empty - 24, // 46: containerd.services.tasks.v1.Tasks.Metrics:output_type -> containerd.services.tasks.v1.MetricsResponse - 26, // 47: containerd.services.tasks.v1.Tasks.Wait:output_type -> containerd.services.tasks.v1.WaitResponse - 31, // [31:48] is the sub-list for method output_type - 14, // [14:31] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_init() } -func file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_init() { - if File_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTaskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteProcessRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KillRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecProcessRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecProcessResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResizePtyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CloseIORequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PauseTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResumeTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListPidsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListPidsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckpointTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckpointTaskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WaitRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WaitResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc, - NumEnums: 0, - NumMessages: 28, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto = out.File - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc = nil - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_goTypes = nil - file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto b/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto deleted file mode 100644 index 8ddd3192602..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto +++ /dev/null @@ -1,227 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.services.tasks.v1; - -import "google/protobuf/empty.proto"; -import "google/protobuf/any.proto"; -import "github.com/containerd/containerd/api/types/mount.proto"; -import "github.com/containerd/containerd/api/types/metrics.proto"; -import "github.com/containerd/containerd/api/types/descriptor.proto"; -import "github.com/containerd/containerd/api/types/task/task.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks"; - -service Tasks { - // Create a task. - rpc Create(CreateTaskRequest) returns (CreateTaskResponse); - - // Start a process. - rpc Start(StartRequest) returns (StartResponse); - - // Delete a task and on disk state. - rpc Delete(DeleteTaskRequest) returns (DeleteResponse); - - rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse); - - rpc Get(GetRequest) returns (GetResponse); - - rpc List(ListTasksRequest) returns (ListTasksResponse); - - // Kill a task or process. - rpc Kill(KillRequest) returns (google.protobuf.Empty); - - rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty); - - rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty); - - rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty); - - rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty); - - rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty); - - rpc ListPids(ListPidsRequest) returns (ListPidsResponse); - - rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse); - - rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty); - - rpc Metrics(MetricsRequest) returns (MetricsResponse); - - rpc Wait(WaitRequest) returns (WaitResponse); -} - -message CreateTaskRequest { - string container_id = 1; - - // RootFS provides the pre-chroot mounts to perform in the shim before - // executing the container task. - // - // These are for mounts that cannot be performed in the user namespace. - // Typically, these mounts should be resolved from snapshots specified on - // the container object. - repeated containerd.types.Mount rootfs = 3; - - string stdin = 4; - string stdout = 5; - string stderr = 6; - bool terminal = 7; - - containerd.types.Descriptor checkpoint = 8; - - google.protobuf.Any options = 9; - - string runtime_path = 10; -} - -message CreateTaskResponse { - string container_id = 1; - uint32 pid = 2; -} - -message StartRequest { - string container_id = 1; - string exec_id = 2; -} - -message StartResponse { - uint32 pid = 1; -} - -message DeleteTaskRequest { - string container_id = 1; -} - -message DeleteResponse { - string id = 1; - uint32 pid = 2; - uint32 exit_status = 3; - google.protobuf.Timestamp exited_at = 4; -} - -message DeleteProcessRequest { - string container_id = 1; - string exec_id = 2; -} - -message GetRequest { - string container_id = 1; - string exec_id = 2; -} - -message GetResponse { - containerd.v1.types.Process process = 1; -} - -message ListTasksRequest { - string filter = 1; -} - -message ListTasksResponse { - repeated containerd.v1.types.Process tasks = 1; -} - -message KillRequest { - string container_id = 1; - string exec_id = 2; - uint32 signal = 3; - bool all = 4; -} - -message ExecProcessRequest { - string container_id = 1; - string stdin = 2; - string stdout = 3; - string stderr = 4; - bool terminal = 5; - // Spec for starting a process in the target container. - // - // For runc, this is a process spec, for example. - google.protobuf.Any spec = 6; - // id of the exec process - string exec_id = 7; -} - -message ExecProcessResponse { -} - -message ResizePtyRequest { - string container_id = 1; - string exec_id = 2; - uint32 width = 3; - uint32 height = 4; -} - -message CloseIORequest { - string container_id = 1; - string exec_id = 2; - bool stdin = 3; -} - -message PauseTaskRequest { - string container_id = 1; -} - -message ResumeTaskRequest { - string container_id = 1; -} - -message ListPidsRequest { - string container_id = 1; -} - -message ListPidsResponse { - // Processes includes the process ID and additional process information - repeated containerd.v1.types.ProcessInfo processes = 1; -} - -message CheckpointTaskRequest { - string container_id = 1; - string parent_checkpoint = 2; - google.protobuf.Any options = 3; -} - -message CheckpointTaskResponse { - repeated containerd.types.Descriptor descriptors = 1; -} - -message UpdateTaskRequest { - string container_id = 1; - google.protobuf.Any resources = 2; - map annotations = 3; -} - -message MetricsRequest { - repeated string filters = 1; -} - -message MetricsResponse { - repeated types.Metric metrics = 1; -} - -message WaitRequest { - string container_id = 1; - string exec_id = 2; -} - -message WaitResponse { - uint32 exit_status = 1; - google.protobuf.Timestamp exited_at = 2; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go b/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go deleted file mode 100644 index 1bc23522abb..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go +++ /dev/null @@ -1,692 +0,0 @@ -//go:build !no_grpc - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.20.1 -// source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto - -package tasks - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// TasksClient is the client API for Tasks service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type TasksClient interface { - // Create a task. - Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) - // Start a process. - Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) - // Delete a task and on disk state. - Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) - DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) - Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) - // Kill a task or process. - Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) - Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) - Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) - Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) -} - -type tasksClient struct { - cc grpc.ClientConnInterface -} - -func NewTasksClient(cc grpc.ClientConnInterface) TasksClient { - return &tasksClient{cc} -} - -func (c *tasksClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) { - out := new(CreateTaskResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Create", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { - out := new(StartResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { - out := new(DeleteResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { - out := new(DeleteResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/DeleteProcess", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { - out := new(GetResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { - out := new(ListTasksResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Kill", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ResizePty", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/CloseIO", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Pause", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Resume", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) { - out := new(ListPidsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ListPids", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) { - out := new(CheckpointTaskResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Checkpoint", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) { - out := new(MetricsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) { - out := new(WaitResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Wait", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// TasksServer is the server API for Tasks service. -// All implementations must embed UnimplementedTasksServer -// for forward compatibility -type TasksServer interface { - // Create a task. - Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) - // Start a process. - Start(context.Context, *StartRequest) (*StartResponse, error) - // Delete a task and on disk state. - Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) - DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) - Get(context.Context, *GetRequest) (*GetResponse, error) - List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) - // Kill a task or process. - Kill(context.Context, *KillRequest) (*emptypb.Empty, error) - Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error) - ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error) - CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error) - Pause(context.Context, *PauseTaskRequest) (*emptypb.Empty, error) - Resume(context.Context, *ResumeTaskRequest) (*emptypb.Empty, error) - ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) - Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) - Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error) - Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) - Wait(context.Context, *WaitRequest) (*WaitResponse, error) - mustEmbedUnimplementedTasksServer() -} - -// UnimplementedTasksServer must be embedded to have forward compatible implementations. -type UnimplementedTasksServer struct { -} - -func (UnimplementedTasksServer) Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (UnimplementedTasksServer) Start(context.Context, *StartRequest) (*StartResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") -} -func (UnimplementedTasksServer) Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (UnimplementedTasksServer) DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProcess not implemented") -} -func (UnimplementedTasksServer) Get(context.Context, *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (UnimplementedTasksServer) List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (UnimplementedTasksServer) Kill(context.Context, *KillRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Kill not implemented") -} -func (UnimplementedTasksServer) Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Exec not implemented") -} -func (UnimplementedTasksServer) ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ResizePty not implemented") -} -func (UnimplementedTasksServer) CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseIO not implemented") -} -func (UnimplementedTasksServer) Pause(context.Context, *PauseTaskRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Pause not implemented") -} -func (UnimplementedTasksServer) Resume(context.Context, *ResumeTaskRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Resume not implemented") -} -func (UnimplementedTasksServer) ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListPids not implemented") -} -func (UnimplementedTasksServer) Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Checkpoint not implemented") -} -func (UnimplementedTasksServer) Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (UnimplementedTasksServer) Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") -} -func (UnimplementedTasksServer) Wait(context.Context, *WaitRequest) (*WaitResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Wait not implemented") -} -func (UnimplementedTasksServer) mustEmbedUnimplementedTasksServer() {} - -// UnsafeTasksServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to TasksServer will -// result in compilation errors. -type UnsafeTasksServer interface { - mustEmbedUnimplementedTasksServer() -} - -func RegisterTasksServer(s grpc.ServiceRegistrar, srv TasksServer) { - s.RegisterService(&Tasks_ServiceDesc, srv) -} - -func _Tasks_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Create(ctx, req.(*CreateTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Start(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Start", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Start(ctx, req.(*StartRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Delete(ctx, req.(*DeleteTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).DeleteProcess(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/DeleteProcess", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).DeleteProcess(ctx, req.(*DeleteProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Get(ctx, req.(*GetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTasksRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).List(ctx, req.(*ListTasksRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KillRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Kill(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Kill", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Kill(ctx, req.(*KillRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExecProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Exec(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Exec", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Exec(ctx, req.(*ExecProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResizePtyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).ResizePty(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/ResizePty", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).ResizePty(ctx, req.(*ResizePtyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CloseIORequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).CloseIO(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/CloseIO", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).CloseIO(ctx, req.(*CloseIORequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PauseTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Pause(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Pause", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Pause(ctx, req.(*PauseTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResumeTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Resume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Resume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Resume(ctx, req.(*ResumeTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListPidsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).ListPids(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/ListPids", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).ListPids(ctx, req.(*ListPidsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CheckpointTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Checkpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Checkpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Checkpoint(ctx, req.(*CheckpointTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Update(ctx, req.(*UpdateTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Metrics(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Metrics", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Metrics(ctx, req.(*MetricsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WaitRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Wait(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Wait", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Wait(ctx, req.(*WaitRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Tasks_ServiceDesc is the grpc.ServiceDesc for Tasks service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Tasks_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.tasks.v1.Tasks", - HandlerType: (*TasksServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Create", - Handler: _Tasks_Create_Handler, - }, - { - MethodName: "Start", - Handler: _Tasks_Start_Handler, - }, - { - MethodName: "Delete", - Handler: _Tasks_Delete_Handler, - }, - { - MethodName: "DeleteProcess", - Handler: _Tasks_DeleteProcess_Handler, - }, - { - MethodName: "Get", - Handler: _Tasks_Get_Handler, - }, - { - MethodName: "List", - Handler: _Tasks_List_Handler, - }, - { - MethodName: "Kill", - Handler: _Tasks_Kill_Handler, - }, - { - MethodName: "Exec", - Handler: _Tasks_Exec_Handler, - }, - { - MethodName: "ResizePty", - Handler: _Tasks_ResizePty_Handler, - }, - { - MethodName: "CloseIO", - Handler: _Tasks_CloseIO_Handler, - }, - { - MethodName: "Pause", - Handler: _Tasks_Pause_Handler, - }, - { - MethodName: "Resume", - Handler: _Tasks_Resume_Handler, - }, - { - MethodName: "ListPids", - Handler: _Tasks_ListPids_Handler, - }, - { - MethodName: "Checkpoint", - Handler: _Tasks_Checkpoint_Handler, - }, - { - MethodName: "Update", - Handler: _Tasks_Update_Handler, - }, - { - MethodName: "Metrics", - Handler: _Tasks_Metrics_Handler, - }, - { - MethodName: "Wait", - Handler: _Tasks_Wait_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_ttrpc.pb.go b/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_ttrpc.pb.go deleted file mode 100644 index 859eec58e0b..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_ttrpc.pb.go +++ /dev/null @@ -1,301 +0,0 @@ -// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. -// source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto -package tasks - -import ( - context "context" - ttrpc "github.com/containerd/ttrpc" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -type TTRPCTasksService interface { - Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) - Start(context.Context, *StartRequest) (*StartResponse, error) - Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) - DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) - Get(context.Context, *GetRequest) (*GetResponse, error) - List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) - Kill(context.Context, *KillRequest) (*emptypb.Empty, error) - Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error) - ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error) - CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error) - Pause(context.Context, *PauseTaskRequest) (*emptypb.Empty, error) - Resume(context.Context, *ResumeTaskRequest) (*emptypb.Empty, error) - ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) - Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) - Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error) - Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) - Wait(context.Context, *WaitRequest) (*WaitResponse, error) -} - -func RegisterTTRPCTasksService(srv *ttrpc.Server, svc TTRPCTasksService) { - srv.RegisterService("containerd.services.tasks.v1.Tasks", &ttrpc.ServiceDesc{ - Methods: map[string]ttrpc.Method{ - "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CreateTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Create(ctx, &req) - }, - "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req StartRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Start(ctx, &req) - }, - "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req DeleteTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Delete(ctx, &req) - }, - "DeleteProcess": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req DeleteProcessRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.DeleteProcess(ctx, &req) - }, - "Get": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req GetRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Get(ctx, &req) - }, - "List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ListTasksRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.List(ctx, &req) - }, - "Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req KillRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Kill(ctx, &req) - }, - "Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ExecProcessRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Exec(ctx, &req) - }, - "ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ResizePtyRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.ResizePty(ctx, &req) - }, - "CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CloseIORequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.CloseIO(ctx, &req) - }, - "Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req PauseTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Pause(ctx, &req) - }, - "Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ResumeTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Resume(ctx, &req) - }, - "ListPids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ListPidsRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.ListPids(ctx, &req) - }, - "Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CheckpointTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Checkpoint(ctx, &req) - }, - "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req UpdateTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Update(ctx, &req) - }, - "Metrics": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req MetricsRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Metrics(ctx, &req) - }, - "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req WaitRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Wait(ctx, &req) - }, - }, - }) -} - -type ttrpctasksClient struct { - client *ttrpc.Client -} - -func NewTTRPCTasksClient(client *ttrpc.Client) TTRPCTasksService { - return &ttrpctasksClient{ - client: client, - } -} - -func (c *ttrpctasksClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { - var resp CreateTaskResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Create", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { - var resp StartResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Start", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Delete(ctx context.Context, req *DeleteTaskRequest) (*DeleteResponse, error) { - var resp DeleteResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Delete", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) { - var resp DeleteResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "DeleteProcess", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { - var resp GetResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Get", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) List(ctx context.Context, req *ListTasksRequest) (*ListTasksResponse, error) { - var resp ListTasksResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "List", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Kill(ctx context.Context, req *KillRequest) (*emptypb.Empty, error) { - var resp emptypb.Empty - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Kill", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Exec(ctx context.Context, req *ExecProcessRequest) (*emptypb.Empty, error) { - var resp emptypb.Empty - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Exec", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*emptypb.Empty, error) { - var resp emptypb.Empty - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "ResizePty", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) CloseIO(ctx context.Context, req *CloseIORequest) (*emptypb.Empty, error) { - var resp emptypb.Empty - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "CloseIO", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Pause(ctx context.Context, req *PauseTaskRequest) (*emptypb.Empty, error) { - var resp emptypb.Empty - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Pause", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Resume(ctx context.Context, req *ResumeTaskRequest) (*emptypb.Empty, error) { - var resp emptypb.Empty - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Resume", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) { - var resp ListPidsResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "ListPids", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*CheckpointTaskResponse, error) { - var resp CheckpointTaskResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Checkpoint", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Update(ctx context.Context, req *UpdateTaskRequest) (*emptypb.Empty, error) { - var resp emptypb.Empty - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Update", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Metrics(ctx context.Context, req *MetricsRequest) (*MetricsResponse, error) { - var resp MetricsResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Metrics", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *ttrpctasksClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { - var resp WaitResponse - if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Wait", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/doc.go b/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/doc.go deleted file mode 100644 index c5c0b85ddb2..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package version defines the version service. -package version diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go b/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go deleted file mode 100644 index c087d3e26be..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go +++ /dev/null @@ -1,187 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/services/version/v1/version.proto - -package version - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type VersionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` -} - -func (x *VersionResponse) Reset() { - *x = VersionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VersionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VersionResponse) ProtoMessage() {} - -func (x *VersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead. -func (*VersionResponse) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescGZIP(), []int{0} -} - -func (x *VersionResponse) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *VersionResponse) GetRevision() string { - if x != nil { - return x.Revision - } - return "" -} - -var File_github_com_containerd_containerd_api_services_version_v1_version_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc = []byte{ - 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x47, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x5d, - 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2f, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x2e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, - 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData = file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_github_com_containerd_containerd_api_services_version_v1_version_proto_goTypes = []interface{}{ - (*VersionResponse)(nil), // 0: containerd.services.version.v1.VersionResponse - (*emptypb.Empty)(nil), // 1: google.protobuf.Empty -} -var file_github_com_containerd_containerd_api_services_version_v1_version_proto_depIdxs = []int32{ - 1, // 0: containerd.services.version.v1.Version.Version:input_type -> google.protobuf.Empty - 0, // 1: containerd.services.version.v1.Version.Version:output_type -> containerd.services.version.v1.VersionResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_services_version_v1_version_proto_init() } -func file_github_com_containerd_containerd_api_services_version_v1_version_proto_init() { - if File_github_com_containerd_containerd_api_services_version_v1_version_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_github_com_containerd_containerd_api_services_version_v1_version_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_services_version_v1_version_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_services_version_v1_version_proto = out.File - file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc = nil - file_github_com_containerd_containerd_api_services_version_v1_version_proto_goTypes = nil - file_github_com_containerd_containerd_api_services_version_v1_version_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto b/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto deleted file mode 100644 index bd948ff343b..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.services.version.v1; - -import "google/protobuf/empty.proto"; - -// TODO(stevvooe): Should version service actually be versioned? -option go_package = "github.com/containerd/containerd/api/services/version/v1;version"; - -service Version { - rpc Version(google.protobuf.Empty) returns (VersionResponse); -} - -message VersionResponse { - string version = 1; - string revision = 2; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go b/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go deleted file mode 100644 index e96eddefb79..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go +++ /dev/null @@ -1,108 +0,0 @@ -//go:build !no_grpc - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.20.1 -// source: github.com/containerd/containerd/api/services/version/v1/version.proto - -package version - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// VersionClient is the client API for Version service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type VersionClient interface { - Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) -} - -type versionClient struct { - cc grpc.ClientConnInterface -} - -func NewVersionClient(cc grpc.ClientConnInterface) VersionClient { - return &versionClient{cc} -} - -func (c *versionClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) { - out := new(VersionResponse) - err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// VersionServer is the server API for Version service. -// All implementations must embed UnimplementedVersionServer -// for forward compatibility -type VersionServer interface { - Version(context.Context, *emptypb.Empty) (*VersionResponse, error) - mustEmbedUnimplementedVersionServer() -} - -// UnimplementedVersionServer must be embedded to have forward compatible implementations. -type UnimplementedVersionServer struct { -} - -func (UnimplementedVersionServer) Version(context.Context, *emptypb.Empty) (*VersionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") -} -func (UnimplementedVersionServer) mustEmbedUnimplementedVersionServer() {} - -// UnsafeVersionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to VersionServer will -// result in compilation errors. -type UnsafeVersionServer interface { - mustEmbedUnimplementedVersionServer() -} - -func RegisterVersionServer(s grpc.ServiceRegistrar, srv VersionServer) { - s.RegisterService(&Version_ServiceDesc, srv) -} - -func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VersionServer).Version(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.version.v1.Version/Version", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VersionServer).Version(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// Version_ServiceDesc is the grpc.ServiceDesc for Version service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Version_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.version.v1.Version", - HandlerType: (*VersionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Version", - Handler: _Version_Version_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto", -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version_ttrpc.pb.go b/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version_ttrpc.pb.go deleted file mode 100644 index c284f14e72d..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version_ttrpc.pb.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. -// source: github.com/containerd/containerd/api/services/version/v1/version.proto -package version - -import ( - context "context" - ttrpc "github.com/containerd/ttrpc" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -type TTRPCVersionService interface { - Version(context.Context, *emptypb.Empty) (*VersionResponse, error) -} - -func RegisterTTRPCVersionService(srv *ttrpc.Server, svc TTRPCVersionService) { - srv.RegisterService("containerd.services.version.v1.Version", &ttrpc.ServiceDesc{ - Methods: map[string]ttrpc.Method{ - "Version": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req emptypb.Empty - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Version(ctx, &req) - }, - }, - }) -} - -type ttrpcversionClient struct { - client *ttrpc.Client -} - -func NewTTRPCVersionClient(client *ttrpc.Client) TTRPCVersionService { - return &ttrpcversionClient{ - client: client, - } -} - -func (c *ttrpcversionClient) Version(ctx context.Context, req *emptypb.Empty) (*VersionResponse, error) { - var resp VersionResponse - if err := c.client.Call(ctx, "containerd.services.version.v1.Version", "Version", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/e2e/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go deleted file mode 100644 index f3db1c52d96..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go +++ /dev/null @@ -1,206 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/types/descriptor.proto - -package types - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Descriptor describes a blob in a content store. -// -// This descriptor can be used to reference content from an -// oci descriptor found in a manifest. -// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor -type Descriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` - Digest string `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` - Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *Descriptor) Reset() { - *x = Descriptor{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Descriptor) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Descriptor) ProtoMessage() {} - -func (x *Descriptor) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Descriptor.ProtoReflect.Descriptor instead. -func (*Descriptor) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescGZIP(), []int{0} -} - -func (x *Descriptor) GetMediaType() string { - if x != nil { - return x.MediaType - } - return "" -} - -func (x *Descriptor) GetDigest() string { - if x != nil { - return x.Digest - } - return "" -} - -func (x *Descriptor) GetSize() int64 { - if x != nil { - return x.Size - } - return 0 -} - -func (x *Descriptor) GetAnnotations() map[string]string { - if x != nil { - return x.Annotations - } - return nil -} - -var File_github_com_containerd_containerd_api_types_descriptor_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, - 0xe8, 0x01, 0x0a, 0x0a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x1d, - 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x4f, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData = file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_github_com_containerd_containerd_api_types_descriptor_proto_goTypes = []interface{}{ - (*Descriptor)(nil), // 0: containerd.types.Descriptor - nil, // 1: containerd.types.Descriptor.AnnotationsEntry -} -var file_github_com_containerd_containerd_api_types_descriptor_proto_depIdxs = []int32{ - 1, // 0: containerd.types.Descriptor.annotations:type_name -> containerd.types.Descriptor.AnnotationsEntry - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_types_descriptor_proto_init() } -func file_github_com_containerd_containerd_api_types_descriptor_proto_init() { - if File_github_com_containerd_containerd_api_types_descriptor_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Descriptor); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_containerd_api_types_descriptor_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_types_descriptor_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_types_descriptor_proto = out.File - file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc = nil - file_github_com_containerd_containerd_api_types_descriptor_proto_goTypes = nil - file_github_com_containerd_containerd_api_types_descriptor_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/descriptor.proto b/e2e/vendor/github.com/containerd/containerd/api/types/descriptor.proto deleted file mode 100644 index faaf416dd10..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/descriptor.proto +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.types; - -option go_package = "github.com/containerd/containerd/api/types;types"; - -// Descriptor describes a blob in a content store. -// -// This descriptor can be used to reference content from an -// oci descriptor found in a manifest. -// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor -message Descriptor { - string media_type = 1; - string digest = 2; - int64 size = 3; - map annotations = 5; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/doc.go b/e2e/vendor/github.com/containerd/containerd/api/types/doc.go deleted file mode 100644 index 475b465ed44..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package types diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/event.pb.go b/e2e/vendor/github.com/containerd/containerd/api/types/event.pb.go deleted file mode 100644 index 6ebe1e26ddb..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/event.pb.go +++ /dev/null @@ -1,209 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/types/event.proto - -package types - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Envelope struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` - Event *anypb.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` -} - -func (x *Envelope) Reset() { - *x = Envelope{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_event_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Envelope) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Envelope) ProtoMessage() {} - -func (x *Envelope) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_event_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. -func (*Envelope) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_event_proto_rawDescGZIP(), []int{0} -} - -func (x *Envelope) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -func (x *Envelope) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *Envelope) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *Envelope) GetEvent() *anypb.Any { - if x != nil { - return x.Event - } - return nil -} - -var File_github_com_containerd_containerd_api_types_event_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_types_event_proto_rawDesc = []byte{ - 0x0a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, - 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a, - 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x3a, 0x04, 0x80, 0xb9, 0x1f, 0x01, 0x42, - 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_types_event_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_types_event_proto_rawDescData = file_github_com_containerd_containerd_api_types_event_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_types_event_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_types_event_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_types_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_event_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_types_event_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_types_event_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_github_com_containerd_containerd_api_types_event_proto_goTypes = []interface{}{ - (*Envelope)(nil), // 0: containerd.types.Envelope - (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp - (*anypb.Any)(nil), // 2: google.protobuf.Any -} -var file_github_com_containerd_containerd_api_types_event_proto_depIdxs = []int32{ - 1, // 0: containerd.types.Envelope.timestamp:type_name -> google.protobuf.Timestamp - 2, // 1: containerd.types.Envelope.event:type_name -> google.protobuf.Any - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_types_event_proto_init() } -func file_github_com_containerd_containerd_api_types_event_proto_init() { - if File_github_com_containerd_containerd_api_types_event_proto != nil { - return - } - file_github_com_containerd_containerd_api_types_fieldpath_proto_init() - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_types_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Envelope); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_types_event_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_containerd_api_types_event_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_types_event_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_types_event_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_types_event_proto = out.File - file_github_com_containerd_containerd_api_types_event_proto_rawDesc = nil - file_github_com_containerd_containerd_api_types_event_proto_goTypes = nil - file_github_com_containerd_containerd_api_types_event_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/event.proto b/e2e/vendor/github.com/containerd/containerd/api/types/event.proto deleted file mode 100644 index a73bc9d4507..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/event.proto +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.types; - -import "github.com/containerd/containerd/api/types/fieldpath.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "github.com/containerd/containerd/api/types;types"; - -message Envelope { - option (containerd.types.fieldpath) = true; - google.protobuf.Timestamp timestamp = 1; - string namespace = 2; - string topic = 3; - google.protobuf.Any event = 4; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/fieldpath.pb.go b/e2e/vendor/github.com/containerd/containerd/api/types/fieldpath.pb.go deleted file mode 100644 index 0f8feb415b4..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/fieldpath.pb.go +++ /dev/null @@ -1,144 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/types/fieldpath.proto - -package types - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -var file_github_com_containerd_containerd_api_types_fieldpath_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63300, - Name: "containerd.types.fieldpath_all", - Tag: "varint,63300,opt,name=fieldpath_all", - Filename: "github.com/containerd/containerd/api/types/fieldpath.proto", - }, - { - ExtendedType: (*descriptorpb.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64400, - Name: "containerd.types.fieldpath", - Tag: "varint,64400,opt,name=fieldpath", - Filename: "github.com/containerd/containerd/api/types/fieldpath.proto", - }, -} - -// Extension fields to descriptorpb.FileOptions. -var ( - // optional bool fieldpath_all = 63300; - E_FieldpathAll = &file_github_com_containerd_containerd_api_types_fieldpath_proto_extTypes[0] -) - -// Extension fields to descriptorpb.MessageOptions. -var ( - // optional bool fieldpath = 64400; - E_Fieldpath = &file_github_com_containerd_containerd_api_types_fieldpath_proto_extTypes[1] -) - -var File_github_com_containerd_containerd_api_types_fieldpath_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_types_fieldpath_proto_rawDesc = []byte{ - 0x0a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x20, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x3a, 0x46, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x6c, - 0x6c, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xc4, 0xee, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, - 0x74, 0x68, 0x41, 0x6c, 0x6c, 0x88, 0x01, 0x01, 0x3a, 0x42, 0x0a, 0x09, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x90, 0xf7, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x88, 0x01, 0x01, 0x42, 0x32, 0x5a, 0x30, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_containerd_containerd_api_types_fieldpath_proto_goTypes = []interface{}{ - (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions - (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions -} -var file_github_com_containerd_containerd_api_types_fieldpath_proto_depIdxs = []int32{ - 0, // 0: containerd.types.fieldpath_all:extendee -> google.protobuf.FileOptions - 1, // 1: containerd.types.fieldpath:extendee -> google.protobuf.MessageOptions - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 0, // [0:2] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_types_fieldpath_proto_init() } -func file_github_com_containerd_containerd_api_types_fieldpath_proto_init() { - if File_github_com_containerd_containerd_api_types_fieldpath_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_types_fieldpath_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 2, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_containerd_api_types_fieldpath_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_types_fieldpath_proto_depIdxs, - ExtensionInfos: file_github_com_containerd_containerd_api_types_fieldpath_proto_extTypes, - }.Build() - File_github_com_containerd_containerd_api_types_fieldpath_proto = out.File - file_github_com_containerd_containerd_api_types_fieldpath_proto_rawDesc = nil - file_github_com_containerd_containerd_api_types_fieldpath_proto_goTypes = nil - file_github_com_containerd_containerd_api_types_fieldpath_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/fieldpath.proto b/e2e/vendor/github.com/containerd/containerd/api/types/fieldpath.proto deleted file mode 100644 index 8b290842b01..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/fieldpath.proto +++ /dev/null @@ -1,42 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; -package containerd.types; - -import "google/protobuf/descriptor.proto"; - -option go_package = "github.com/containerd/containerd/api/types;types"; - -extend google.protobuf.FileOptions { - optional bool fieldpath_all = 63300; -} - -extend google.protobuf.MessageOptions { - optional bool fieldpath = 64400; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/introspection.pb.go b/e2e/vendor/github.com/containerd/containerd/api/types/introspection.pb.go deleted file mode 100644 index 2f9c2ac4495..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/introspection.pb.go +++ /dev/null @@ -1,375 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/types/introspection.proto - -package types - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type RuntimeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RuntimePath string `protobuf:"bytes,1,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"` - // Options correspond to CreateTaskRequest.options. - // This is needed to pass the runc binary path, etc. - Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` -} - -func (x *RuntimeRequest) Reset() { - *x = RuntimeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RuntimeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RuntimeRequest) ProtoMessage() {} - -func (x *RuntimeRequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RuntimeRequest.ProtoReflect.Descriptor instead. -func (*RuntimeRequest) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP(), []int{0} -} - -func (x *RuntimeRequest) GetRuntimePath() string { - if x != nil { - return x.RuntimePath - } - return "" -} - -func (x *RuntimeRequest) GetOptions() *anypb.Any { - if x != nil { - return x.Options - } - return nil -} - -type RuntimeVersion struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` -} - -func (x *RuntimeVersion) Reset() { - *x = RuntimeVersion{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RuntimeVersion) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RuntimeVersion) ProtoMessage() {} - -func (x *RuntimeVersion) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RuntimeVersion.ProtoReflect.Descriptor instead. -func (*RuntimeVersion) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP(), []int{1} -} - -func (x *RuntimeVersion) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *RuntimeVersion) GetRevision() string { - if x != nil { - return x.Revision - } - return "" -} - -type RuntimeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version *RuntimeVersion `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.) - Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` - // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md - Features *anypb.Any `protobuf:"bytes,4,opt,name=features,proto3" json:"features,omitempty"` - // Annotations of the shim. Irrelevant to features.Annotations. - Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *RuntimeInfo) Reset() { - *x = RuntimeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RuntimeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RuntimeInfo) ProtoMessage() {} - -func (x *RuntimeInfo) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RuntimeInfo.ProtoReflect.Descriptor instead. -func (*RuntimeInfo) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP(), []int{2} -} - -func (x *RuntimeInfo) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *RuntimeInfo) GetVersion() *RuntimeVersion { - if x != nil { - return x.Version - } - return nil -} - -func (x *RuntimeInfo) GetOptions() *anypb.Any { - if x != nil { - return x.Options - } - return nil -} - -func (x *RuntimeInfo) GetFeatures() *anypb.Any { - if x != nil { - return x.Features - } - return nil -} - -func (x *RuntimeInfo) GetAnnotations() map[string]string { - if x != nil { - return x.Annotations - } - return nil -} - -var File_github_com_containerd_containerd_api_types_introspection_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc = []byte{ - 0x0a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x69, 0x6e, 0x74, - 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, - 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x61, - 0x74, 0x68, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x46, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, - 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x52, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3a, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, - 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, - 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_types_introspection_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData = file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_types_introspection_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_github_com_containerd_containerd_api_types_introspection_proto_goTypes = []interface{}{ - (*RuntimeRequest)(nil), // 0: containerd.types.RuntimeRequest - (*RuntimeVersion)(nil), // 1: containerd.types.RuntimeVersion - (*RuntimeInfo)(nil), // 2: containerd.types.RuntimeInfo - nil, // 3: containerd.types.RuntimeInfo.AnnotationsEntry - (*anypb.Any)(nil), // 4: google.protobuf.Any -} -var file_github_com_containerd_containerd_api_types_introspection_proto_depIdxs = []int32{ - 4, // 0: containerd.types.RuntimeRequest.options:type_name -> google.protobuf.Any - 1, // 1: containerd.types.RuntimeInfo.version:type_name -> containerd.types.RuntimeVersion - 4, // 2: containerd.types.RuntimeInfo.options:type_name -> google.protobuf.Any - 4, // 3: containerd.types.RuntimeInfo.features:type_name -> google.protobuf.Any - 3, // 4: containerd.types.RuntimeInfo.annotations:type_name -> containerd.types.RuntimeInfo.AnnotationsEntry - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_types_introspection_proto_init() } -func file_github_com_containerd_containerd_api_types_introspection_proto_init() { - if File_github_com_containerd_containerd_api_types_introspection_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RuntimeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RuntimeVersion); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RuntimeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_containerd_api_types_introspection_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_types_introspection_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_types_introspection_proto = out.File - file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc = nil - file_github_com_containerd_containerd_api_types_introspection_proto_goTypes = nil - file_github_com_containerd_containerd_api_types_introspection_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/introspection.proto b/e2e/vendor/github.com/containerd/containerd/api/types/introspection.proto deleted file mode 100644 index 8f3fcb5a482..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/introspection.proto +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.types; - -import "google/protobuf/any.proto"; - -option go_package = "github.com/containerd/containerd/api/types;types"; - -message RuntimeRequest { - string runtime_path = 1; - // Options correspond to CreateTaskRequest.options. - // This is needed to pass the runc binary path, etc. - google.protobuf.Any options = 2; -} - -message RuntimeVersion { - string version = 1; - string revision = 2; -} - -message RuntimeInfo { - string name = 1; - RuntimeVersion version = 2; - // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.) - google.protobuf.Any options = 3; - // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md - google.protobuf.Any features = 4; - // Annotations of the shim. Irrelevant to features.Annotations. - map annotations = 5; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/e2e/vendor/github.com/containerd/containerd/api/types/metrics.pb.go deleted file mode 100644 index b18ce1c5b60..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/metrics.pb.go +++ /dev/null @@ -1,194 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/types/metrics.proto - -package types - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Metric struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Data *anypb.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *Metric) Reset() { - *x = Metric{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Metric) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Metric) ProtoMessage() {} - -func (x *Metric) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Metric.ProtoReflect.Descriptor instead. -func (*Metric) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_metrics_proto_rawDescGZIP(), []int{0} -} - -func (x *Metric) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -func (x *Metric) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -func (x *Metric) GetData() *anypb.Any { - if x != nil { - return x.Data - } - return nil -} - -var File_github_com_containerd_containerd_api_types_metrics_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc = []byte{ - 0x0a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x19, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x28, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_types_metrics_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData = file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_types_metrics_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_types_metrics_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_github_com_containerd_containerd_api_types_metrics_proto_goTypes = []interface{}{ - (*Metric)(nil), // 0: containerd.types.Metric - (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp - (*anypb.Any)(nil), // 2: google.protobuf.Any -} -var file_github_com_containerd_containerd_api_types_metrics_proto_depIdxs = []int32{ - 1, // 0: containerd.types.Metric.timestamp:type_name -> google.protobuf.Timestamp - 2, // 1: containerd.types.Metric.data:type_name -> google.protobuf.Any - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_types_metrics_proto_init() } -func file_github_com_containerd_containerd_api_types_metrics_proto_init() { - if File_github_com_containerd_containerd_api_types_metrics_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metric); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_containerd_api_types_metrics_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_types_metrics_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_types_metrics_proto = out.File - file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc = nil - file_github_com_containerd_containerd_api_types_metrics_proto_goTypes = nil - file_github_com_containerd_containerd_api_types_metrics_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/metrics.proto b/e2e/vendor/github.com/containerd/containerd/api/types/metrics.proto deleted file mode 100644 index 3e6a7751e37..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/metrics.proto +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.types; - -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "github.com/containerd/containerd/api/types;types"; - -message Metric { - google.protobuf.Timestamp timestamp = 1; - string id = 2; - google.protobuf.Any data = 3; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/e2e/vendor/github.com/containerd/containerd/api/types/mount.pb.go deleted file mode 100644 index ff77a7d7bd2..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/mount.pb.go +++ /dev/null @@ -1,202 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/types/mount.proto - -package types - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Mount describes mounts for a container. -// -// This type is the lingua franca of ContainerD. All services provide mounts -// to be used with the container at creation time. -// -// The Mount type follows the structure of the mount syscall, including a type, -// source, target and options. -type Mount struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Type defines the nature of the mount. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` - // Target path in container - Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` - // Options specifies zero or more fstab style mount options. - Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` -} - -func (x *Mount) Reset() { - *x = Mount{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_mount_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Mount) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Mount) ProtoMessage() {} - -func (x *Mount) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_mount_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Mount.ProtoReflect.Descriptor instead. -func (*Mount) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_mount_proto_rawDescGZIP(), []int{0} -} - -func (x *Mount) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Mount) GetSource() string { - if x != nil { - return x.Source - } - return "" -} - -func (x *Mount) GetTarget() string { - if x != nil { - return x.Target - } - return "" -} - -func (x *Mount) GetOptions() []string { - if x != nil { - return x.Options - } - return nil -} - -var File_github_com_containerd_containerd_api_types_mount_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_types_mount_proto_rawDesc = []byte{ - 0x0a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, - 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x65, 0x0a, 0x05, 0x4d, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_types_mount_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_types_mount_proto_rawDescData = file_github_com_containerd_containerd_api_types_mount_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_types_mount_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_types_mount_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_types_mount_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_mount_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_types_mount_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_types_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_github_com_containerd_containerd_api_types_mount_proto_goTypes = []interface{}{ - (*Mount)(nil), // 0: containerd.types.Mount -} -var file_github_com_containerd_containerd_api_types_mount_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_types_mount_proto_init() } -func file_github_com_containerd_containerd_api_types_mount_proto_init() { - if File_github_com_containerd_containerd_api_types_mount_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_types_mount_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Mount); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_types_mount_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_containerd_api_types_mount_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_types_mount_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_types_mount_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_types_mount_proto = out.File - file_github_com_containerd_containerd_api_types_mount_proto_rawDesc = nil - file_github_com_containerd_containerd_api_types_mount_proto_goTypes = nil - file_github_com_containerd_containerd_api_types_mount_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/mount.proto b/e2e/vendor/github.com/containerd/containerd/api/types/mount.proto deleted file mode 100644 index 54e0a0cddfa..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/mount.proto +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.types; - -option go_package = "github.com/containerd/containerd/api/types;types"; - -// Mount describes mounts for a container. -// -// This type is the lingua franca of ContainerD. All services provide mounts -// to be used with the container at creation time. -// -// The Mount type follows the structure of the mount syscall, including a type, -// source, target and options. -message Mount { - // Type defines the nature of the mount. - string type = 1; - - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - string source = 2; - - // Target path in container - string target = 3; - - // Options specifies zero or more fstab style mount options. - repeated string options = 4; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/e2e/vendor/github.com/containerd/containerd/api/types/platform.pb.go deleted file mode 100644 index daa62b834e1..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/platform.pb.go +++ /dev/null @@ -1,194 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/types/platform.proto - -package types - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Platform follows the structure of the OCI platform specification, from -// descriptors. -type Platform struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"` - Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"` - Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"` - OSVersion string `protobuf:"bytes,4,opt,name=os_version,json=osVersion,proto3" json:"os_version,omitempty"` -} - -func (x *Platform) Reset() { - *x = Platform{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_platform_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Platform) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Platform) ProtoMessage() {} - -func (x *Platform) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_platform_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Platform.ProtoReflect.Descriptor instead. -func (*Platform) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_platform_proto_rawDescGZIP(), []int{0} -} - -func (x *Platform) GetOS() string { - if x != nil { - return x.OS - } - return "" -} - -func (x *Platform) GetArchitecture() string { - if x != nil { - return x.Architecture - } - return "" -} - -func (x *Platform) GetVariant() string { - if x != nil { - return x.Variant - } - return "" -} - -func (x *Platform) GetOsVersion() string { - if x != nil { - return x.OSVersion - } - return "" -} - -var File_github_com_containerd_containerd_api_types_platform_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, - 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x77, 0x0a, - 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, - 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x73, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x73, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_types_platform_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_types_platform_proto_rawDescData = file_github_com_containerd_containerd_api_types_platform_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_types_platform_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_types_platform_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_types_platform_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_platform_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_types_platform_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_types_platform_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_github_com_containerd_containerd_api_types_platform_proto_goTypes = []interface{}{ - (*Platform)(nil), // 0: containerd.types.Platform -} -var file_github_com_containerd_containerd_api_types_platform_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_types_platform_proto_init() } -func file_github_com_containerd_containerd_api_types_platform_proto_init() { - if File_github_com_containerd_containerd_api_types_platform_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_types_platform_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Platform); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_types_platform_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_containerd_api_types_platform_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_types_platform_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_types_platform_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_types_platform_proto = out.File - file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = nil - file_github_com_containerd_containerd_api_types_platform_proto_goTypes = nil - file_github_com_containerd_containerd_api_types_platform_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/platform.proto b/e2e/vendor/github.com/containerd/containerd/api/types/platform.proto deleted file mode 100644 index 0b9180016de..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/platform.proto +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.types; - -option go_package = "github.com/containerd/containerd/api/types;types"; - -// Platform follows the structure of the OCI platform specification, from -// descriptors. -message Platform { - string os = 1; - string architecture = 2; - string variant = 3; - string os_version = 4; -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/platform_helpers.go b/e2e/vendor/github.com/containerd/containerd/api/types/platform_helpers.go deleted file mode 100644 index d8c1a687705..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/platform_helpers.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package types - -import oci "github.com/opencontainers/image-spec/specs-go/v1" - -// OCIPlatformToProto converts from a slice of OCI [specs.Platform] to a -// slice of the protobuf definition [Platform]. -func OCIPlatformToProto(platforms []oci.Platform) []*Platform { - ap := make([]*Platform, len(platforms)) - for i := range platforms { - ap[i] = &Platform{ - OS: platforms[i].OS, - OSVersion: platforms[i].OSVersion, - Architecture: platforms[i].Architecture, - Variant: platforms[i].Variant, - } - } - return ap -} - -// OCIPlatformFromProto converts a slice of the protobuf definition [Platform] -// to a slice of OCI [specs.Platform]. -func OCIPlatformFromProto(platforms []*Platform) []oci.Platform { - op := make([]oci.Platform, len(platforms)) - for i := range platforms { - op[i] = oci.Platform{ - OS: platforms[i].OS, - OSVersion: platforms[i].OSVersion, - Architecture: platforms[i].Architecture, - Variant: platforms[i].Variant, - } - } - return op -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go b/e2e/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go deleted file mode 100644 index 77888bf3320..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go +++ /dev/null @@ -1,357 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/types/sandbox.proto - -package types - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Sandbox represents a sandbox metadata object that keeps all info required by controller to -// work with a particular instance. -type Sandbox struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // SandboxID is a unique instance identifier within namespace - SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` - // Runtime specifies which runtime to use for executing this container. - Runtime *Sandbox_Runtime `protobuf:"bytes,2,opt,name=runtime,proto3" json:"runtime,omitempty"` - // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the - // bundle directory (similary to OCI spec). - Spec *anypb.Any `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec,omitempty"` - // Labels provides an area to include arbitrary data on containers. - Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // CreatedAt is the time the container was first created. - CreatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - // UpdatedAt is the last time the container was mutated. - UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` - // Extensions allow clients to provide optional blobs that can be handled by runtime. - Extensions map[string]*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Sandboxer is the name of the sandbox controller who manages the sandbox. - Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"` -} - -func (x *Sandbox) Reset() { - *x = Sandbox{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Sandbox) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Sandbox) ProtoMessage() {} - -func (x *Sandbox) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Sandbox.ProtoReflect.Descriptor instead. -func (*Sandbox) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescGZIP(), []int{0} -} - -func (x *Sandbox) GetSandboxID() string { - if x != nil { - return x.SandboxID - } - return "" -} - -func (x *Sandbox) GetRuntime() *Sandbox_Runtime { - if x != nil { - return x.Runtime - } - return nil -} - -func (x *Sandbox) GetSpec() *anypb.Any { - if x != nil { - return x.Spec - } - return nil -} - -func (x *Sandbox) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *Sandbox) GetCreatedAt() *timestamppb.Timestamp { - if x != nil { - return x.CreatedAt - } - return nil -} - -func (x *Sandbox) GetUpdatedAt() *timestamppb.Timestamp { - if x != nil { - return x.UpdatedAt - } - return nil -} - -func (x *Sandbox) GetExtensions() map[string]*anypb.Any { - if x != nil { - return x.Extensions - } - return nil -} - -func (x *Sandbox) GetSandboxer() string { - if x != nil { - return x.Sandboxer - } - return "" -} - -type Sandbox_Runtime struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Name is the name of the runtime. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). - // Typically this data expected to be runtime shim implementation specific. - Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` -} - -func (x *Sandbox_Runtime) Reset() { - *x = Sandbox_Runtime{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Sandbox_Runtime) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Sandbox_Runtime) ProtoMessage() {} - -func (x *Sandbox_Runtime) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Sandbox_Runtime.ProtoReflect.Descriptor instead. -func (*Sandbox_Runtime) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *Sandbox_Runtime) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Sandbox_Runtime) GetOptions() *anypb.Any { - if x != nil { - return x.Options - } - return nil -} - -var File_github_com_containerd_containerd_api_types_sandbox_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = []byte{ - 0x0a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x61, 0x6e, - 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x19, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8c, 0x05, 0x0a, 0x07, 0x53, 0x61, 0x6e, - 0x64, 0x62, 0x6f, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, - 0x78, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, - 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x12, 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, - 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, - 0x49, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x61, - 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, - 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, - 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData = file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_github_com_containerd_containerd_api_types_sandbox_proto_goTypes = []interface{}{ - (*Sandbox)(nil), // 0: containerd.types.Sandbox - (*Sandbox_Runtime)(nil), // 1: containerd.types.Sandbox.Runtime - nil, // 2: containerd.types.Sandbox.LabelsEntry - nil, // 3: containerd.types.Sandbox.ExtensionsEntry - (*anypb.Any)(nil), // 4: google.protobuf.Any - (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp -} -var file_github_com_containerd_containerd_api_types_sandbox_proto_depIdxs = []int32{ - 1, // 0: containerd.types.Sandbox.runtime:type_name -> containerd.types.Sandbox.Runtime - 4, // 1: containerd.types.Sandbox.spec:type_name -> google.protobuf.Any - 2, // 2: containerd.types.Sandbox.labels:type_name -> containerd.types.Sandbox.LabelsEntry - 5, // 3: containerd.types.Sandbox.created_at:type_name -> google.protobuf.Timestamp - 5, // 4: containerd.types.Sandbox.updated_at:type_name -> google.protobuf.Timestamp - 3, // 5: containerd.types.Sandbox.extensions:type_name -> containerd.types.Sandbox.ExtensionsEntry - 4, // 6: containerd.types.Sandbox.Runtime.options:type_name -> google.protobuf.Any - 4, // 7: containerd.types.Sandbox.ExtensionsEntry.value:type_name -> google.protobuf.Any - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_types_sandbox_proto_init() } -func file_github_com_containerd_containerd_api_types_sandbox_proto_init() { - if File_github_com_containerd_containerd_api_types_sandbox_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Sandbox); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Sandbox_Runtime); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_containerd_api_types_sandbox_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_types_sandbox_proto_depIdxs, - MessageInfos: file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_types_sandbox_proto = out.File - file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = nil - file_github_com_containerd_containerd_api_types_sandbox_proto_goTypes = nil - file_github_com_containerd_containerd_api_types_sandbox_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/sandbox.proto b/e2e/vendor/github.com/containerd/containerd/api/types/sandbox.proto deleted file mode 100644 index b0bf233b954..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/sandbox.proto +++ /dev/null @@ -1,54 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.types; - -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "github.com/containerd/containerd/api/types;types"; - -// Sandbox represents a sandbox metadata object that keeps all info required by controller to -// work with a particular instance. -message Sandbox { - // SandboxID is a unique instance identifier within namespace - string sandbox_id = 1; - message Runtime { - // Name is the name of the runtime. - string name = 1; - // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). - // Typically this data expected to be runtime shim implementation specific. - google.protobuf.Any options = 2; - } - // Runtime specifies which runtime to use for executing this container. - Runtime runtime = 2; - // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the - // bundle directory (similary to OCI spec). - google.protobuf.Any spec = 3; - // Labels provides an area to include arbitrary data on containers. - map labels = 4; - // CreatedAt is the time the container was first created. - google.protobuf.Timestamp created_at = 5; - // UpdatedAt is the last time the container was mutated. - google.protobuf.Timestamp updated_at = 6; - // Extensions allow clients to provide optional blobs that can be handled by runtime. - map extensions = 7; - // Sandboxer is the name of the sandbox controller who manages the sandbox. - string sandboxer = 10; - -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/task/doc.go b/e2e/vendor/github.com/containerd/containerd/api/types/task/doc.go deleted file mode 100644 index e10c7a46993..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/task/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package task defines the task service. -package task diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/e2e/vendor/github.com/containerd/containerd/api/types/task/task.pb.go deleted file mode 100644 index 5c58d1ef187..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/task/task.pb.go +++ /dev/null @@ -1,406 +0,0 @@ -// -//Copyright The containerd Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/containerd/api/types/task/task.proto - -package task - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Status int32 - -const ( - Status_UNKNOWN Status = 0 - Status_CREATED Status = 1 - Status_RUNNING Status = 2 - Status_STOPPED Status = 3 - Status_PAUSED Status = 4 - Status_PAUSING Status = 5 -) - -// Enum value maps for Status. -var ( - Status_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CREATED", - 2: "RUNNING", - 3: "STOPPED", - 4: "PAUSED", - 5: "PAUSING", - } - Status_value = map[string]int32{ - "UNKNOWN": 0, - "CREATED": 1, - "RUNNING": 2, - "STOPPED": 3, - "PAUSED": 4, - "PAUSING": 5, - } -) - -func (x Status) Enum() *Status { - p := new(Status) - *p = x - return p -} - -func (x Status) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Status) Descriptor() protoreflect.EnumDescriptor { - return file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes[0].Descriptor() -} - -func (Status) Type() protoreflect.EnumType { - return &file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes[0] -} - -func (x Status) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Status.Descriptor instead. -func (Status) EnumDescriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP(), []int{0} -} - -type Process struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` - ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` -} - -func (x *Process) Reset() { - *x = Process{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Process) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Process) ProtoMessage() {} - -func (x *Process) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Process.ProtoReflect.Descriptor instead. -func (*Process) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP(), []int{0} -} - -func (x *Process) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *Process) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -func (x *Process) GetPid() uint32 { - if x != nil { - return x.Pid - } - return 0 -} - -func (x *Process) GetStatus() Status { - if x != nil { - return x.Status - } - return Status_UNKNOWN -} - -func (x *Process) GetStdin() string { - if x != nil { - return x.Stdin - } - return "" -} - -func (x *Process) GetStdout() string { - if x != nil { - return x.Stdout - } - return "" -} - -func (x *Process) GetStderr() string { - if x != nil { - return x.Stderr - } - return "" -} - -func (x *Process) GetTerminal() bool { - if x != nil { - return x.Terminal - } - return false -} - -func (x *Process) GetExitStatus() uint32 { - if x != nil { - return x.ExitStatus - } - return 0 -} - -func (x *Process) GetExitedAt() *timestamppb.Timestamp { - if x != nil { - return x.ExitedAt - } - return nil -} - -type ProcessInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // PID is the process ID. - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - // Info contains additional process information. - // - // Info varies by platform. - Info *anypb.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` -} - -func (x *ProcessInfo) Reset() { - *x = ProcessInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProcessInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessInfo) ProtoMessage() {} - -func (x *ProcessInfo) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessInfo.ProtoReflect.Descriptor instead. -func (*ProcessInfo) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP(), []int{1} -} - -func (x *ProcessInfo) GetPid() uint32 { - if x != nil { - return x.Pid - } - return 0 -} - -func (x *ProcessInfo) GetInfo() *anypb.Any { - if x != nil { - return x.Info - } - return nil -} - -var File_github_com_containerd_containerd_api_types_task_task_proto protoreflect.FileDescriptor - -var file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc = []byte{ - 0x0a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x61, 0x73, - 0x6b, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbf, 0x02, - 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, - 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x33, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, - 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, - 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, - 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, - 0x49, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, - 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, - 0x12, 0x28, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x2a, 0x55, 0x0a, 0x06, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, - 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x53, - 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x55, 0x53, - 0x45, 0x44, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x41, 0x55, 0x53, 0x49, 0x4e, 0x47, 0x10, - 0x05, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, - 0x74, 0x61, 0x73, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_github_com_containerd_containerd_api_types_task_task_proto_rawDescOnce sync.Once - file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData = file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc -) - -func file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP() []byte { - file_github_com_containerd_containerd_api_types_task_task_proto_rawDescOnce.Do(func() { - file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData) - }) - return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData -} - -var file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_github_com_containerd_containerd_api_types_task_task_proto_goTypes = []interface{}{ - (Status)(0), // 0: containerd.v1.types.Status - (*Process)(nil), // 1: containerd.v1.types.Process - (*ProcessInfo)(nil), // 2: containerd.v1.types.ProcessInfo - (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp - (*anypb.Any)(nil), // 4: google.protobuf.Any -} -var file_github_com_containerd_containerd_api_types_task_task_proto_depIdxs = []int32{ - 0, // 0: containerd.v1.types.Process.status:type_name -> containerd.v1.types.Status - 3, // 1: containerd.v1.types.Process.exited_at:type_name -> google.protobuf.Timestamp - 4, // 2: containerd.v1.types.ProcessInfo.info:type_name -> google.protobuf.Any - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_containerd_api_types_task_task_proto_init() } -func file_github_com_containerd_containerd_api_types_task_task_proto_init() { - if File_github_com_containerd_containerd_api_types_task_task_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Process); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcessInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc, - NumEnums: 1, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_containerd_api_types_task_task_proto_goTypes, - DependencyIndexes: file_github_com_containerd_containerd_api_types_task_task_proto_depIdxs, - EnumInfos: file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes, - MessageInfos: file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes, - }.Build() - File_github_com_containerd_containerd_api_types_task_task_proto = out.File - file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc = nil - file_github_com_containerd_containerd_api_types_task_task_proto_goTypes = nil - file_github_com_containerd_containerd_api_types_task_task_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/containerd/api/types/task/task.proto b/e2e/vendor/github.com/containerd/containerd/api/types/task/task.proto deleted file mode 100644 index afc8e94bb47..00000000000 --- a/e2e/vendor/github.com/containerd/containerd/api/types/task/task.proto +++ /dev/null @@ -1,55 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.v1.types; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/any.proto"; - -option go_package = "github.com/containerd/containerd/api/types/task"; - -enum Status { - UNKNOWN = 0; - CREATED = 1; - RUNNING = 2; - STOPPED = 3; - PAUSED = 4; - PAUSING = 5; -} - -message Process { - string container_id = 1; - string id = 2; - uint32 pid = 3; - Status status = 4; - string stdin = 5; - string stdout = 6; - string stderr = 7; - bool terminal = 8; - uint32 exit_status = 9; - google.protobuf.Timestamp exited_at = 10; -} - -message ProcessInfo { - // PID is the process ID. - uint32 pid = 1; - // Info contains additional process information. - // - // Info varies by platform. - google.protobuf.Any info = 2; -} diff --git a/e2e/vendor/github.com/containerd/errdefs/LICENSE b/e2e/vendor/github.com/containerd/errdefs/LICENSE deleted file mode 100644 index 584149b6ee2..00000000000 --- a/e2e/vendor/github.com/containerd/errdefs/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/github.com/containerd/errdefs/README.md b/e2e/vendor/github.com/containerd/errdefs/README.md deleted file mode 100644 index bd418c63f98..00000000000 --- a/e2e/vendor/github.com/containerd/errdefs/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# errdefs - -A Go package for defining and checking common containerd errors. - -## Project details - -**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/e2e/vendor/github.com/containerd/errdefs/errors.go b/e2e/vendor/github.com/containerd/errdefs/errors.go deleted file mode 100644 index f654d196496..00000000000 --- a/e2e/vendor/github.com/containerd/errdefs/errors.go +++ /dev/null @@ -1,443 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package errdefs defines the common errors used throughout containerd -// packages. -// -// Use with fmt.Errorf to add context to an error. -// -// To detect an error class, use the IsXXX functions to tell whether an error -// is of a certain type. -package errdefs - -import ( - "context" - "errors" -) - -// Definitions of common error types used throughout containerd. All containerd -// errors returned by most packages will map into one of these errors classes. -// Packages should return errors of these types when they want to instruct a -// client to take a particular action. -// -// These errors map closely to grpc errors. -var ( - ErrUnknown = errUnknown{} - ErrInvalidArgument = errInvalidArgument{} - ErrNotFound = errNotFound{} - ErrAlreadyExists = errAlreadyExists{} - ErrPermissionDenied = errPermissionDenied{} - ErrResourceExhausted = errResourceExhausted{} - ErrFailedPrecondition = errFailedPrecondition{} - ErrConflict = errConflict{} - ErrNotModified = errNotModified{} - ErrAborted = errAborted{} - ErrOutOfRange = errOutOfRange{} - ErrNotImplemented = errNotImplemented{} - ErrInternal = errInternal{} - ErrUnavailable = errUnavailable{} - ErrDataLoss = errDataLoss{} - ErrUnauthenticated = errUnauthorized{} -) - -// cancelled maps to Moby's "ErrCancelled" -type cancelled interface { - Cancelled() -} - -// IsCanceled returns true if the error is due to `context.Canceled`. -func IsCanceled(err error) bool { - return errors.Is(err, context.Canceled) || isInterface[cancelled](err) -} - -type errUnknown struct{} - -func (errUnknown) Error() string { return "unknown" } - -func (errUnknown) Unknown() {} - -func (e errUnknown) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// unknown maps to Moby's "ErrUnknown" -type unknown interface { - Unknown() -} - -// IsUnknown returns true if the error is due to an unknown error, -// unhandled condition or unexpected response. -func IsUnknown(err error) bool { - return errors.Is(err, errUnknown{}) || isInterface[unknown](err) -} - -type errInvalidArgument struct{} - -func (errInvalidArgument) Error() string { return "invalid argument" } - -func (errInvalidArgument) InvalidParameter() {} - -func (e errInvalidArgument) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// invalidParameter maps to Moby's "ErrInvalidParameter" -type invalidParameter interface { - InvalidParameter() -} - -// IsInvalidArgument returns true if the error is due to an invalid argument -func IsInvalidArgument(err error) bool { - return errors.Is(err, ErrInvalidArgument) || isInterface[invalidParameter](err) -} - -// deadlineExceed maps to Moby's "ErrDeadline" -type deadlineExceeded interface { - DeadlineExceeded() -} - -// IsDeadlineExceeded returns true if the error is due to -// `context.DeadlineExceeded`. -func IsDeadlineExceeded(err error) bool { - return errors.Is(err, context.DeadlineExceeded) || isInterface[deadlineExceeded](err) -} - -type errNotFound struct{} - -func (errNotFound) Error() string { return "not found" } - -func (errNotFound) NotFound() {} - -func (e errNotFound) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// notFound maps to Moby's "ErrNotFound" -type notFound interface { - NotFound() -} - -// IsNotFound returns true if the error is due to a missing object -func IsNotFound(err error) bool { - return errors.Is(err, ErrNotFound) || isInterface[notFound](err) -} - -type errAlreadyExists struct{} - -func (errAlreadyExists) Error() string { return "already exists" } - -func (errAlreadyExists) AlreadyExists() {} - -func (e errAlreadyExists) WithMessage(msg string) error { - return customMessage{e, msg} -} - -type alreadyExists interface { - AlreadyExists() -} - -// IsAlreadyExists returns true if the error is due to an already existing -// metadata item -func IsAlreadyExists(err error) bool { - return errors.Is(err, ErrAlreadyExists) || isInterface[alreadyExists](err) -} - -type errPermissionDenied struct{} - -func (errPermissionDenied) Error() string { return "permission denied" } - -func (errPermissionDenied) Forbidden() {} - -func (e errPermissionDenied) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// forbidden maps to Moby's "ErrForbidden" -type forbidden interface { - Forbidden() -} - -// IsPermissionDenied returns true if the error is due to permission denied -// or forbidden (403) response -func IsPermissionDenied(err error) bool { - return errors.Is(err, ErrPermissionDenied) || isInterface[forbidden](err) -} - -type errResourceExhausted struct{} - -func (errResourceExhausted) Error() string { return "resource exhausted" } - -func (errResourceExhausted) ResourceExhausted() {} - -func (e errResourceExhausted) WithMessage(msg string) error { - return customMessage{e, msg} -} - -type resourceExhausted interface { - ResourceExhausted() -} - -// IsResourceExhausted returns true if the error is due to -// a lack of resources or too many attempts. -func IsResourceExhausted(err error) bool { - return errors.Is(err, errResourceExhausted{}) || isInterface[resourceExhausted](err) -} - -type errFailedPrecondition struct{} - -func (e errFailedPrecondition) Error() string { return "failed precondition" } - -func (errFailedPrecondition) FailedPrecondition() {} - -func (e errFailedPrecondition) WithMessage(msg string) error { - return customMessage{e, msg} -} - -type failedPrecondition interface { - FailedPrecondition() -} - -// IsFailedPrecondition returns true if an operation could not proceed due to -// the lack of a particular condition -func IsFailedPrecondition(err error) bool { - return errors.Is(err, errFailedPrecondition{}) || isInterface[failedPrecondition](err) -} - -type errConflict struct{} - -func (errConflict) Error() string { return "conflict" } - -func (errConflict) Conflict() {} - -func (e errConflict) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// conflict maps to Moby's "ErrConflict" -type conflict interface { - Conflict() -} - -// IsConflict returns true if an operation could not proceed due to -// a conflict. -func IsConflict(err error) bool { - return errors.Is(err, errConflict{}) || isInterface[conflict](err) -} - -type errNotModified struct{} - -func (errNotModified) Error() string { return "not modified" } - -func (errNotModified) NotModified() {} - -func (e errNotModified) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// notModified maps to Moby's "ErrNotModified" -type notModified interface { - NotModified() -} - -// IsNotModified returns true if an operation could not proceed due -// to an object not modified from a previous state. -func IsNotModified(err error) bool { - return errors.Is(err, errNotModified{}) || isInterface[notModified](err) -} - -type errAborted struct{} - -func (errAborted) Error() string { return "aborted" } - -func (errAborted) Aborted() {} - -func (e errAborted) WithMessage(msg string) error { - return customMessage{e, msg} -} - -type aborted interface { - Aborted() -} - -// IsAborted returns true if an operation was aborted. -func IsAborted(err error) bool { - return errors.Is(err, errAborted{}) || isInterface[aborted](err) -} - -type errOutOfRange struct{} - -func (errOutOfRange) Error() string { return "out of range" } - -func (errOutOfRange) OutOfRange() {} - -func (e errOutOfRange) WithMessage(msg string) error { - return customMessage{e, msg} -} - -type outOfRange interface { - OutOfRange() -} - -// IsOutOfRange returns true if an operation could not proceed due -// to data being out of the expected range. -func IsOutOfRange(err error) bool { - return errors.Is(err, errOutOfRange{}) || isInterface[outOfRange](err) -} - -type errNotImplemented struct{} - -func (errNotImplemented) Error() string { return "not implemented" } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// notImplemented maps to Moby's "ErrNotImplemented" -type notImplemented interface { - NotImplemented() -} - -// IsNotImplemented returns true if the error is due to not being implemented -func IsNotImplemented(err error) bool { - return errors.Is(err, errNotImplemented{}) || isInterface[notImplemented](err) -} - -type errInternal struct{} - -func (errInternal) Error() string { return "internal" } - -func (errInternal) System() {} - -func (e errInternal) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// system maps to Moby's "ErrSystem" -type system interface { - System() -} - -// IsInternal returns true if the error returns to an internal or system error -func IsInternal(err error) bool { - return errors.Is(err, errInternal{}) || isInterface[system](err) -} - -type errUnavailable struct{} - -func (errUnavailable) Error() string { return "unavailable" } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// unavailable maps to Moby's "ErrUnavailable" -type unavailable interface { - Unavailable() -} - -// IsUnavailable returns true if the error is due to a resource being unavailable -func IsUnavailable(err error) bool { - return errors.Is(err, errUnavailable{}) || isInterface[unavailable](err) -} - -type errDataLoss struct{} - -func (errDataLoss) Error() string { return "data loss" } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// dataLoss maps to Moby's "ErrDataLoss" -type dataLoss interface { - DataLoss() -} - -// IsDataLoss returns true if data during an operation was lost or corrupted -func IsDataLoss(err error) bool { - return errors.Is(err, errDataLoss{}) || isInterface[dataLoss](err) -} - -type errUnauthorized struct{} - -func (errUnauthorized) Error() string { return "unauthorized" } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) WithMessage(msg string) error { - return customMessage{e, msg} -} - -// unauthorized maps to Moby's "ErrUnauthorized" -type unauthorized interface { - Unauthorized() -} - -// IsUnauthorized returns true if the error indicates that the user was -// unauthenticated or unauthorized. -func IsUnauthorized(err error) bool { - return errors.Is(err, errUnauthorized{}) || isInterface[unauthorized](err) -} - -func isInterface[T any](err error) bool { - for { - switch x := err.(type) { - case T: - return true - case customMessage: - err = x.err - case interface{ Unwrap() error }: - err = x.Unwrap() - if err == nil { - return false - } - case interface{ Unwrap() []error }: - for _, err := range x.Unwrap() { - if isInterface[T](err) { - return true - } - } - return false - default: - return false - } - } -} - -// customMessage is used to provide a defined error with a custom message. -// The message is not wrapped but can be compared by the `Is(error) bool` interface. -type customMessage struct { - err error - msg string -} - -func (c customMessage) Is(err error) bool { - return c.err == err -} - -func (c customMessage) As(target any) bool { - return errors.As(c.err, target) -} - -func (c customMessage) Error() string { - return c.msg -} diff --git a/e2e/vendor/github.com/containerd/errdefs/pkg/LICENSE b/e2e/vendor/github.com/containerd/errdefs/pkg/LICENSE deleted file mode 100644 index 584149b6ee2..00000000000 --- a/e2e/vendor/github.com/containerd/errdefs/pkg/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go b/e2e/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go deleted file mode 100644 index 59577595a23..00000000000 --- a/e2e/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go +++ /dev/null @@ -1,353 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package errgrpc provides utility functions for translating errors to -// and from a gRPC context. -// -// The functions ToGRPC and ToNative can be used to map server-side and -// client-side errors to the correct types. -package errgrpc - -import ( - "context" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - - spb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/protoadapt" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/containerd/typeurl/v2" - - "github.com/containerd/errdefs" - "github.com/containerd/errdefs/pkg/internal/cause" - "github.com/containerd/errdefs/pkg/internal/types" -) - -// ToGRPC will attempt to map the error into a grpc error, from the error types -// defined in the the errdefs package and attempign to preserve the original -// description. Any type which does not resolve to a defined error type will -// be assigned the unknown error code. -// -// Further information may be extracted from certain errors depending on their -// type. The grpc error details will be used to attempt to preserve as much of -// the error structures and types as possible. -// -// Errors which can be marshaled using protobuf or typeurl will be considered -// for including as GRPC error details. -// Additionally, use the following interfaces in errors to preserve custom types: -// -// WrapError(error) error - Used to wrap the previous error -// JoinErrors(...error) error - Used to join all previous errors -// CollapseError() - Used for errors which carry information but -// should not have their error message shown. -func ToGRPC(err error) error { - if err == nil { - return nil - } - - if _, ok := status.FromError(err); ok { - // error has already been mapped to grpc - return err - } - st := statusFromError(err) - if st != nil { - if details := errorDetails(err, false); len(details) > 0 { - if ds, _ := st.WithDetails(details...); ds != nil { - st = ds - } - } - err = st.Err() - } - return err -} - -func statusFromError(err error) *status.Status { - switch errdefs.Resolve(err) { - case errdefs.ErrInvalidArgument: - return status.New(codes.InvalidArgument, err.Error()) - case errdefs.ErrNotFound: - return status.New(codes.NotFound, err.Error()) - case errdefs.ErrAlreadyExists: - return status.New(codes.AlreadyExists, err.Error()) - case errdefs.ErrPermissionDenied: - return status.New(codes.PermissionDenied, err.Error()) - case errdefs.ErrResourceExhausted: - return status.New(codes.ResourceExhausted, err.Error()) - case errdefs.ErrFailedPrecondition, errdefs.ErrConflict, errdefs.ErrNotModified: - return status.New(codes.FailedPrecondition, err.Error()) - case errdefs.ErrAborted: - return status.New(codes.Aborted, err.Error()) - case errdefs.ErrOutOfRange: - return status.New(codes.OutOfRange, err.Error()) - case errdefs.ErrNotImplemented: - return status.New(codes.Unimplemented, err.Error()) - case errdefs.ErrInternal: - return status.New(codes.Internal, err.Error()) - case errdefs.ErrUnavailable: - return status.New(codes.Unavailable, err.Error()) - case errdefs.ErrDataLoss: - return status.New(codes.DataLoss, err.Error()) - case errdefs.ErrUnauthenticated: - return status.New(codes.Unauthenticated, err.Error()) - case context.DeadlineExceeded: - return status.New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.New(codes.Canceled, err.Error()) - case errdefs.ErrUnknown: - return status.New(codes.Unknown, err.Error()) - } - return nil -} - -// errorDetails returns an array of errors which make up the provided error. -// If firstIncluded is true, then all encodable errors will be used, otherwise -// the first error in an error list will be not be used, to account for the -// the base status error which details are added to via wrap or join. -// -// The errors are ordered in way that they can be applied in order by either -// wrapping or joining the errors to recreate an error with the same structure -// when `WrapError` and `JoinErrors` interfaces are used. -// -// The intent is that when re-applying the errors to create a single error, the -// results of calls to `Error()`, `errors.Is`, `errors.As`, and "%+v" formatting -// is the same as the original error. -func errorDetails(err error, firstIncluded bool) []protoadapt.MessageV1 { - switch uerr := err.(type) { - case interface{ Unwrap() error }: - details := errorDetails(uerr.Unwrap(), firstIncluded) - - // If the type is able to wrap, then include if proto - if _, ok := err.(interface{ WrapError(error) error }); ok { - // Get proto message - if protoErr := toProtoMessage(err); protoErr != nil { - details = append(details, protoErr) - } - } - - return details - case interface{ Unwrap() []error }: - var details []protoadapt.MessageV1 - for i, e := range uerr.Unwrap() { - details = append(details, errorDetails(e, firstIncluded || i > 0)...) - } - - if _, ok := err.(interface{ JoinErrors(...error) error }); ok { - // Get proto message - if protoErr := toProtoMessage(err); protoErr != nil { - details = append(details, protoErr) - } - } - return details - } - - if firstIncluded { - if protoErr := toProtoMessage(err); protoErr != nil { - return []protoadapt.MessageV1{protoErr} - } - if gs, ok := status.FromError(ToGRPC(err)); ok { - return []protoadapt.MessageV1{gs.Proto()} - } - // TODO: Else include unknown extra error type? - } - - return nil -} - -func toProtoMessage(err error) protoadapt.MessageV1 { - // Do not double encode proto messages, otherwise use Any - if pm, ok := err.(protoadapt.MessageV1); ok { - return pm - } - if pm, ok := err.(proto.Message); ok { - return protoadapt.MessageV1Of(pm) - } - - if reflect.TypeOf(err).Kind() == reflect.Ptr { - a, aerr := typeurl.MarshalAny(err) - if aerr == nil { - return &anypb.Any{ - TypeUrl: a.GetTypeUrl(), - Value: a.GetValue(), - } - } - } - return nil -} - -// ToGRPCf maps the error to grpc error codes, assembling the formatting string -// and combining it with the target error string. -// -// This is equivalent to grpc.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -} - -// ToNative returns the underlying error from a grpc service based on the grpc -// error code. The grpc details are used to add wrap the error in more context -// or support multiple errors. -func ToNative(err error) error { - if err == nil { - return nil - } - - s, isGRPC := status.FromError(err) - - var ( - desc string - code codes.Code - ) - - if isGRPC { - desc = s.Message() - code = s.Code() - } else { - desc = err.Error() - code = codes.Unknown - } - - var cls error // divide these into error classes, becomes the cause - - switch code { - case codes.InvalidArgument: - cls = errdefs.ErrInvalidArgument - case codes.AlreadyExists: - cls = errdefs.ErrAlreadyExists - case codes.NotFound: - cls = errdefs.ErrNotFound - case codes.Unavailable: - cls = errdefs.ErrUnavailable - case codes.FailedPrecondition: - // TODO: Has suffix is not sufficient for conflict and not modified - // Message should start with ": " or be at beginning of a line - // Message should end with ": " or be at the end of a line - // Compile a regex - if desc == errdefs.ErrConflict.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrConflict.Error()) { - cls = errdefs.ErrConflict - } else if desc == errdefs.ErrNotModified.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrNotModified.Error()) { - cls = errdefs.ErrNotModified - } else { - cls = errdefs.ErrFailedPrecondition - } - case codes.Unimplemented: - cls = errdefs.ErrNotImplemented - case codes.Canceled: - cls = context.Canceled - case codes.DeadlineExceeded: - cls = context.DeadlineExceeded - case codes.Aborted: - cls = errdefs.ErrAborted - case codes.Unauthenticated: - cls = errdefs.ErrUnauthenticated - case codes.PermissionDenied: - cls = errdefs.ErrPermissionDenied - case codes.Internal: - cls = errdefs.ErrInternal - case codes.DataLoss: - cls = errdefs.ErrDataLoss - case codes.OutOfRange: - cls = errdefs.ErrOutOfRange - case codes.ResourceExhausted: - cls = errdefs.ErrResourceExhausted - default: - if idx := strings.LastIndex(desc, cause.UnexpectedStatusPrefix); idx > 0 { - if status, uerr := strconv.Atoi(desc[idx+len(cause.UnexpectedStatusPrefix):]); uerr == nil && status >= 200 && status < 600 { - cls = cause.ErrUnexpectedStatus{Status: status} - } - } - if cls == nil { - cls = errdefs.ErrUnknown - } - } - - msg := rebaseMessage(cls, desc) - if msg == "" { - err = cls - } else if msg != desc { - err = fmt.Errorf("%s: %w", msg, cls) - } else if wm, ok := cls.(interface{ WithMessage(string) error }); ok { - err = wm.WithMessage(msg) - } else { - err = fmt.Errorf("%s: %w", msg, cls) - } - - if isGRPC { - errs := []error{err} - for _, a := range s.Details() { - var derr error - - // First decode error if needed - if s, ok := a.(*spb.Status); ok { - derr = ToNative(status.ErrorProto(s)) - } else if e, ok := a.(error); ok { - derr = e - } else if dany, ok := a.(typeurl.Any); ok { - i, uerr := typeurl.UnmarshalAny(dany) - if uerr == nil { - if e, ok = i.(error); ok { - derr = e - } else { - derr = fmt.Errorf("non-error unmarshalled detail: %v", i) - } - } else { - derr = fmt.Errorf("error of type %q with failure to unmarshal: %v", dany.GetTypeUrl(), uerr) - } - } else { - derr = fmt.Errorf("non-error detail: %v", a) - } - - switch werr := derr.(type) { - case interface{ WrapError(error) error }: - errs[len(errs)-1] = werr.WrapError(errs[len(errs)-1]) - case interface{ JoinErrors(...error) error }: - // TODO: Consider whether this should support joining a subset - errs[0] = werr.JoinErrors(errs...) - case interface{ CollapseError() }: - errs[len(errs)-1] = types.CollapsedError(errs[len(errs)-1], derr) - default: - errs = append(errs, derr) - } - - } - if len(errs) > 1 { - err = errors.Join(errs...) - } else { - err = errs[0] - } - } - - return err -} - -// rebaseMessage removes the repeats for an error at the end of an error -// string. This will happen when taking an error over grpc then remapping it. -// -// Effectively, we just remove the string of cls from the end of err if it -// appears there. -func rebaseMessage(cls error, desc string) string { - clss := cls.Error() - if desc == clss { - return "" - } - - return strings.TrimSuffix(desc, ": "+clss) -} diff --git a/e2e/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go b/e2e/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go deleted file mode 100644 index d88756bb065..00000000000 --- a/e2e/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package cause is used to define root causes for errors -// common to errors packages like grpc and http. -package cause - -import "fmt" - -type ErrUnexpectedStatus struct { - Status int -} - -const UnexpectedStatusPrefix = "unexpected status " - -func (e ErrUnexpectedStatus) Error() string { - return fmt.Sprintf("%s%d", UnexpectedStatusPrefix, e.Status) -} - -func (ErrUnexpectedStatus) Unknown() {} diff --git a/e2e/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go b/e2e/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go deleted file mode 100644 index a37e7722a8f..00000000000 --- a/e2e/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package types - -import "fmt" - -// CollapsibleError indicates the error should be collapsed -type CollapsibleError interface { - CollapseError() -} - -// CollapsedError returns a new error with the collapsed -// error returned on unwrapped or when formatted with "%+v" -func CollapsedError(err error, collapsed ...error) error { - return collapsedError{err, collapsed} -} - -type collapsedError struct { - error - collapsed []error -} - -func (c collapsedError) Unwrap() []error { - return append([]error{c.error}, c.collapsed...) -} - -func (c collapsedError) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", c.error) - for _, err := range c.collapsed { - fmt.Fprintf(s, "\n%+v", err) - } - return - } - fallthrough - case 's': - fmt.Fprint(s, c.Error()) - case 'q': - fmt.Fprintf(s, "%q", c.Error()) - } -} diff --git a/e2e/vendor/github.com/containerd/errdefs/resolve.go b/e2e/vendor/github.com/containerd/errdefs/resolve.go deleted file mode 100644 index c02d4a73f4e..00000000000 --- a/e2e/vendor/github.com/containerd/errdefs/resolve.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errdefs - -import "context" - -// Resolve returns the first error found in the error chain which matches an -// error defined in this package or context error. A raw, unwrapped error is -// returned or ErrUnknown if no matching error is found. -// -// This is useful for determining a response code based on the outermost wrapped -// error rather than the original cause. For example, a not found error deep -// in the code may be wrapped as an invalid argument. When determining status -// code from Is* functions, the depth or ordering of the error is not -// considered. -// -// The search order is depth first, a wrapped error returned from any part of -// the chain from `Unwrap() error` will be returned before any joined errors -// as returned by `Unwrap() []error`. -func Resolve(err error) error { - if err == nil { - return nil - } - err = firstError(err) - if err == nil { - err = ErrUnknown - } - return err -} - -func firstError(err error) error { - for { - switch err { - case ErrUnknown, - ErrInvalidArgument, - ErrNotFound, - ErrAlreadyExists, - ErrPermissionDenied, - ErrResourceExhausted, - ErrFailedPrecondition, - ErrConflict, - ErrNotModified, - ErrAborted, - ErrOutOfRange, - ErrNotImplemented, - ErrInternal, - ErrUnavailable, - ErrDataLoss, - ErrUnauthenticated, - context.DeadlineExceeded, - context.Canceled: - return err - } - switch e := err.(type) { - case customMessage: - err = e.err - case unknown: - return ErrUnknown - case invalidParameter: - return ErrInvalidArgument - case notFound: - return ErrNotFound - case alreadyExists: - return ErrAlreadyExists - case forbidden: - return ErrPermissionDenied - case resourceExhausted: - return ErrResourceExhausted - case failedPrecondition: - return ErrFailedPrecondition - case conflict: - return ErrConflict - case notModified: - return ErrNotModified - case aborted: - return ErrAborted - case errOutOfRange: - return ErrOutOfRange - case notImplemented: - return ErrNotImplemented - case system: - return ErrInternal - case unavailable: - return ErrUnavailable - case dataLoss: - return ErrDataLoss - case unauthorized: - return ErrUnauthenticated - case deadlineExceeded: - return context.DeadlineExceeded - case cancelled: - return context.Canceled - case interface{ Unwrap() error }: - err = e.Unwrap() - if err == nil { - return nil - } - case interface{ Unwrap() []error }: - for _, ue := range e.Unwrap() { - if fe := firstError(ue); fe != nil { - return fe - } - } - return nil - case interface{ Is(error) bool }: - for _, target := range []error{ErrUnknown, - ErrInvalidArgument, - ErrNotFound, - ErrAlreadyExists, - ErrPermissionDenied, - ErrResourceExhausted, - ErrFailedPrecondition, - ErrConflict, - ErrNotModified, - ErrAborted, - ErrOutOfRange, - ErrNotImplemented, - ErrInternal, - ErrUnavailable, - ErrDataLoss, - ErrUnauthenticated, - context.DeadlineExceeded, - context.Canceled} { - if e.Is(target) { - return target - } - } - return nil - default: - return nil - } - } -} diff --git a/e2e/vendor/github.com/containerd/log/.golangci.yml b/e2e/vendor/github.com/containerd/log/.golangci.yml deleted file mode 100644 index a695775df49..00000000000 --- a/e2e/vendor/github.com/containerd/log/.golangci.yml +++ /dev/null @@ -1,30 +0,0 @@ -linters: - enable: - - exportloopref # Checks for pointers to enclosing loop variables - - gofmt - - goimports - - gosec - - ineffassign - - misspell - - nolintlint - - revive - - staticcheck - - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 - - unconvert - - unused - - vet - - dupword # Checks for duplicate words in the source code - disable: - - errcheck - -run: - timeout: 5m - skip-dirs: - - api - - cluster - - design - - docs - - docs/man - - releases - - reports - - test # e2e scripts diff --git a/e2e/vendor/github.com/containerd/log/LICENSE b/e2e/vendor/github.com/containerd/log/LICENSE deleted file mode 100644 index 584149b6ee2..00000000000 --- a/e2e/vendor/github.com/containerd/log/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/github.com/containerd/log/README.md b/e2e/vendor/github.com/containerd/log/README.md deleted file mode 100644 index 00e08498801..00000000000 --- a/e2e/vendor/github.com/containerd/log/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# log - -A Go package providing a common logging interface across containerd repositories and a way for clients to use and configure logging in containerd packages. - -This package is not intended to be used as a standalone logging package outside of the containerd ecosystem and is intended as an interface wrapper around a logging implementation. -In the future this package may be replaced with a common go logging interface. - -## Project details - -**log** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. - diff --git a/e2e/vendor/github.com/containerd/log/context.go b/e2e/vendor/github.com/containerd/log/context.go deleted file mode 100644 index 20153066f3a..00000000000 --- a/e2e/vendor/github.com/containerd/log/context.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package log provides types and functions related to logging, passing -// loggers through a context, and attaching context to the logger. -// -// # Transitional types -// -// This package contains various types that are aliases for types in [logrus]. -// These aliases are intended for transitioning away from hard-coding logrus -// as logging implementation. Consumers of this package are encouraged to use -// the type-aliases from this package instead of directly using their logrus -// equivalent. -// -// The intent is to replace these aliases with locally defined types and -// interfaces once all consumers are no longer directly importing logrus -// types. -// -// IMPORTANT: due to the transitional purpose of this package, it is not -// guaranteed for the full logrus API to be provided in the future. As -// outlined, these aliases are provided as a step to transition away from -// a specific implementation which, as a result, exposes the full logrus API. -// While no decisions have been made on the ultimate design and interface -// provided by this package, we do not expect carrying "less common" features. -package log - -import ( - "context" - "fmt" - - "github.com/sirupsen/logrus" -) - -// G is a shorthand for [GetLogger]. -// -// We may want to define this locally to a package to get package tagged log -// messages. -var G = GetLogger - -// L is an alias for the standard logger. -var L = &Entry{ - Logger: logrus.StandardLogger(), - // Default is three fields plus a little extra room. - Data: make(Fields, 6), -} - -type loggerKey struct{} - -// Fields type to pass to "WithFields". -type Fields = map[string]any - -// Entry is a logging entry. It contains all the fields passed with -// [Entry.WithFields]. It's finally logged when Trace, Debug, Info, Warn, -// Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -// -// Entry is a transitional type, and currently an alias for [logrus.Entry]. -type Entry = logrus.Entry - -// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using -// zeros to ensure the formatted time is always the same number of -// characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// Level is a logging level. -type Level = logrus.Level - -// Supported log levels. -const ( - // TraceLevel level. Designates finer-grained informational events - // than [DebugLevel]. - TraceLevel Level = logrus.TraceLevel - - // DebugLevel level. Usually only enabled when debugging. Very verbose - // logging. - DebugLevel Level = logrus.DebugLevel - - // InfoLevel level. General operational entries about what's going on - // inside the application. - InfoLevel Level = logrus.InfoLevel - - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel Level = logrus.WarnLevel - - // ErrorLevel level. Logs errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel Level = logrus.ErrorLevel - - // FatalLevel level. Logs and then calls "logger.Exit(1)". It exits - // even if the logging level is set to Panic. - FatalLevel Level = logrus.FatalLevel - - // PanicLevel level. This is the highest level of severity. Logs and - // then calls panic with the message passed to Debug, Info, ... - PanicLevel Level = logrus.PanicLevel -) - -// SetLevel sets log level globally. It returns an error if the given -// level is not supported. -// -// level can be one of: -// -// - "trace" ([TraceLevel]) -// - "debug" ([DebugLevel]) -// - "info" ([InfoLevel]) -// - "warn" ([WarnLevel]) -// - "error" ([ErrorLevel]) -// - "fatal" ([FatalLevel]) -// - "panic" ([PanicLevel]) -func SetLevel(level string) error { - lvl, err := logrus.ParseLevel(level) - if err != nil { - return err - } - - L.Logger.SetLevel(lvl) - return nil -} - -// GetLevel returns the current log level. -func GetLevel() Level { - return L.Logger.GetLevel() -} - -// OutputFormat specifies a log output format. -type OutputFormat string - -// Supported log output formats. -const ( - // TextFormat represents the text logging format. - TextFormat OutputFormat = "text" - - // JSONFormat represents the JSON logging format. - JSONFormat OutputFormat = "json" -) - -// SetFormat sets the log output format ([TextFormat] or [JSONFormat]). -func SetFormat(format OutputFormat) error { - switch format { - case TextFormat: - L.Logger.SetFormatter(&logrus.TextFormatter{ - TimestampFormat: RFC3339NanoFixed, - FullTimestamp: true, - }) - return nil - case JSONFormat: - L.Logger.SetFormatter(&logrus.JSONFormatter{ - TimestampFormat: RFC3339NanoFixed, - }) - return nil - default: - return fmt.Errorf("unknown log format: %s", format) - } -} - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger.WithContext(ctx)) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -func GetLogger(ctx context.Context) *Entry { - if logger := ctx.Value(loggerKey{}); logger != nil { - return logger.(*Entry) - } - return L.WithContext(ctx) -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/.gitattributes b/e2e/vendor/github.com/containerd/ttrpc/.gitattributes deleted file mode 100644 index d207b1802b2..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf diff --git a/e2e/vendor/github.com/containerd/ttrpc/.gitignore b/e2e/vendor/github.com/containerd/ttrpc/.gitignore deleted file mode 100644 index 88ceb2764bd..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -# Binaries for programs and plugins -/bin/ -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out -coverage.txt diff --git a/e2e/vendor/github.com/containerd/ttrpc/.golangci.yml b/e2e/vendor/github.com/containerd/ttrpc/.golangci.yml deleted file mode 100644 index 6462e52f66f..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/.golangci.yml +++ /dev/null @@ -1,52 +0,0 @@ -linters: - enable: - - staticcheck - - unconvert - - gofmt - - goimports - - revive - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -linters-settings: - revive: - ignore-generated-headers: true - rules: - - name: blank-imports - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: error-return - - name: error-strings - - name: error-naming - - name: exported - - name: if-return - - name: increment-decrement - - name: var-naming - arguments: [["UID", "GID"], []] - - name: var-declaration - - name: package-comments - - name: range - - name: receiver-naming - - name: time-naming - - name: unexported-return - - name: indent-error-flow - - name: errorf - - name: empty-block - - name: superfluous-else - - name: unused-parameter - - name: unreachable-code - - name: redefines-builtin-id - -issues: - include: - - EXC0002 - -run: - timeout: 8m - skip-dirs: - - example diff --git a/e2e/vendor/github.com/containerd/ttrpc/LICENSE b/e2e/vendor/github.com/containerd/ttrpc/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/github.com/containerd/ttrpc/Makefile b/e2e/vendor/github.com/containerd/ttrpc/Makefile deleted file mode 100644 index c3a497dcac0..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/Makefile +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright The containerd Authors. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Go command to use for build -GO ?= go -INSTALL ?= install - -# Root directory of the project (absolute path). -ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) - -WHALE = "🇩" -ONI = "👹" - -# Project binaries. -COMMANDS=protoc-gen-go-ttrpc protoc-gen-gogottrpc - -ifdef BUILDTAGS - GO_BUILDTAGS = ${BUILDTAGS} -endif -GO_BUILDTAGS ?= -GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",) - -# Project packages. -PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /example) -TESTPACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /cmd | grep -v /integration | grep -v /example) -BINPACKAGES=$(addprefix ./cmd/,$(COMMANDS)) - -#Replaces ":" (*nix), ";" (windows) with newline for easy parsing -GOPATHS=$(shell echo ${GOPATH} | tr ":" "\n" | tr ";" "\n") - -TESTFLAGS_RACE= -GO_BUILD_FLAGS= -# See Golang issue re: '-trimpath': https://github.com/golang/go/issues/13809 -GO_GCFLAGS=$(shell \ - set -- ${GOPATHS}; \ - echo "-gcflags=-trimpath=$${1}/src"; \ - ) - -BINARIES=$(addprefix bin/,$(COMMANDS)) - -# Flags passed to `go test` -TESTFLAGS ?= $(TESTFLAGS_RACE) $(EXTRA_TESTFLAGS) -TESTFLAGS_PARALLEL ?= 8 - -# Use this to replace `go test` with, for instance, `gotestsum` -GOTEST ?= $(GO) test - -.PHONY: clean all AUTHORS build binaries test integration generate protos check-protos coverage ci check help install vendor install-protobuf install-protobuild -.DEFAULT: default - -# Forcibly set the default goal to all, in case an include above brought in a rule definition. -.DEFAULT_GOAL := all - -all: binaries - -check: proto-fmt ## run all linters - @echo "$(WHALE) $@" - GOGC=75 golangci-lint run - -ci: check binaries check-protos coverage # coverage-integration ## to be used by the CI - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -generate: protos - @echo "$(WHALE) $@" - @PATH="${ROOTDIR}/bin:${PATH}" $(GO) generate -x ${PACKAGES} - -protos: bin/protoc-gen-gogottrpc bin/protoc-gen-go-ttrpc ## generate protobuf - @echo "$(WHALE) $@" - @(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${PACKAGES}) - -check-protos: protos ## check if protobufs needs to be generated again - @echo "$(WHALE) $@" - @test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \ - ((git diff | cat) && \ - (echo "$(ONI) please run 'make protos' when making changes to proto files" && false)) - -check-api-descriptors: protos ## check that protobuf changes aren't present. - @echo "$(WHALE) $@" - @test -z "$$(git status --short | grep ".pb.txt" | tee /dev/stderr)" || \ - ((git diff $$(find . -name '*.pb.txt') | cat) && \ - (echo "$(ONI) please run 'make protos' when making changes to proto files and check-in the generated descriptor file changes" && false)) - -proto-fmt: ## check format of proto files - @echo "$(WHALE) $@" - @test -z "$$(find . -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \ - (echo "$(ONI) please indent proto files with tabs only" && false) - @test -z "$$(find . -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \ - (echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false) - -build: ## build the go packages - @echo "$(WHALE) $@" - @$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${EXTRA_FLAGS} ${PACKAGES} - -test: ## run tests, except integration tests and tests that require root - @echo "$(WHALE) $@" - @$(GOTEST) ${TESTFLAGS} ${TESTPACKAGES} - -integration: ## run integration tests - @echo "$(WHALE) $@" - @cd "${ROOTDIR}/integration" && $(GOTEST) -v ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} . - -benchmark: ## run benchmarks tests - @echo "$(WHALE) $@" - @$(GO) test ${TESTFLAGS} -bench . -run Benchmark - -FORCE: - -define BUILD_BINARY -@echo "$(WHALE) $@" -@$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_TAGS} ./$< -endef - -# Build a binary from a cmd. -bin/%: cmd/% FORCE - $(call BUILD_BINARY) - -binaries: $(BINARIES) ## build binaries - @echo "$(WHALE) $@" - -clean: ## clean up binaries - @echo "$(WHALE) $@" - @rm -f $(BINARIES) - -install: ## install binaries - @echo "$(WHALE) $@ $(BINPACKAGES)" - @$(GO) install $(BINPACKAGES) - -install-protobuf: - @echo "$(WHALE) $@" - @script/install-protobuf - -install-protobuild: - @echo "$(WHALE) $@" - @$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1 - @$(GO) install github.com/containerd/protobuild@14832ccc41429f5c4f81028e5af08aa233a219cf - -coverage: ## generate coverprofiles from the unit tests, except tests that require root - @echo "$(WHALE) $@" - @rm -f coverage.txt - @$(GO) test ${TESTFLAGS} ${TESTPACKAGES} 2> /dev/null - @( for pkg in ${PACKAGES}; do \ - $(GO) test ${TESTFLAGS} \ - -cover \ - -coverprofile=profile.out \ - -covermode=atomic $$pkg || exit; \ - if [ -f profile.out ]; then \ - cat profile.out >> coverage.txt; \ - rm profile.out; \ - fi; \ - done ) - -vendor: ## ensure all the go.mod/go.sum files are up-to-date - @echo "$(WHALE) $@" - @$(GO) mod tidy - @$(GO) mod verify - -verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date - @echo "$(WHALE) $@" - @$(GO) mod tidy - @$(GO) mod verify - @test -z "$$(git status --short | grep "go.sum" | tee /dev/stderr)" || \ - ((git diff | cat) && \ - (echo "$(ONI) make sure to checkin changes after go mod tidy" && false)) - -help: ## this help - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort diff --git a/e2e/vendor/github.com/containerd/ttrpc/PROTOCOL.md b/e2e/vendor/github.com/containerd/ttrpc/PROTOCOL.md deleted file mode 100644 index 12b43f6bd6e..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/PROTOCOL.md +++ /dev/null @@ -1,240 +0,0 @@ -# Protocol Specification - -The ttrpc protocol is client/server protocol to support multiple request streams -over a single connection with lightweight framing. The client represents the -process which initiated the underlying connection and the server is the process -which accepted the connection. The protocol is currently defined as -asymmetrical, with clients sending requests and servers sending responses. Both -clients and servers are able to send stream data. The roles are also used in -determining the stream identifiers, with client initiated streams using odd -number identifiers and server initiated using even number. The protocol may be -extended in the future to support server initiated streams, that is not -supported in the latest version. - -## Purpose - -The ttrpc protocol is designed to be lightweight and optimized for low latency -and reliable connections between processes on the same host. The protocol does -not include features for handling unreliable connections such as handshakes, -resets, pings, or flow control. The protocol is designed to make low-overhead -implementations as simple as possible. It is not intended as a suitable -replacement for HTTP2/3 over the network. - -## Message Frame - -Each Message Frame consists of a 10-byte message header followed -by message data. The data length and stream ID are both big-endian -4-byte unsigned integers. The message type is an unsigned 1-byte -integer. The flags are also an unsigned 1-byte integer and -use is defined by the message type. - - +---------------------------------------------------------------+ - | Data Length (32) | - +---------------------------------------------------------------+ - | Stream ID (32) | - +---------------+-----------------------------------------------+ - | Msg Type (8) | - +---------------+ - | Flags (8) | - +---------------+-----------------------------------------------+ - | Data (*) | - +---------------------------------------------------------------+ - -The Data Length field represents the number of bytes in the Data field. The -total frame size will always be Data Length + 10 bytes. The maximum data length -is 4MB and any larger size should be rejected. Due to the maximum data size -being less than 16MB, the first frame byte should always be zero. This first -byte should be considered reserved for future use. - -The Stream ID must be odd for client initiated streams and even for server -initiated streams. Server initiated streams are not currently supported. - -## Mesage Types - -| Message Type | Name | Description | -|--------------|----------|----------------------------------| -| 0x01 | Request | Initiates stream | -| 0x02 | Response | Final stream data and terminates | -| 0x03 | Data | Stream data | - -### Request - -The request message is used to initiate stream and send along request data for -properly routing and handling the stream. The stream may indicate unary without -any inbound or outbound stream data with only a response is expected on the -stream. The request may also indicate the stream is still open for more data and -no response is expected until data is finished. If the remote indicates the -stream is closed, the request may be considered non-unary but without anymore -stream data sent. In the case of `remote closed`, the remote still expects to -receive a response or stream data. For compatibility with non streaming clients, -a request with empty flags indicates a unary request. - -#### Request Flags - -| Flag | Name | Description | -|------|-----------------|--------------------------------------------------| -| 0x01 | `remote closed` | Non-unary, but no more data expected from remote | -| 0x02 | `remote open` | Non-unary, remote is still sending data | - -### Response - -The response message is used to end a stream with data, an empty response, or -an error. A response message is the only expected message after a unary request. -A non-unary request does not require a response message if the server is sending -back stream data. A non-unary stream may return a single response message but no -other stream data may follow. - -#### Response Flags - -No response flags are defined at this time, flags should be empty. - -### Data - -The data message is used to send data on an already initialized stream. Either -client or server may send data. A data message is not allowed on a unary stream. -A data message should not be sent after indicating `remote closed` to the peer. -The last data message on a stream must set the `remote closed` flag. - -The `no data` flag is used to indicate that the data message does not include -any data. This is normally used with the `remote closed` flag to indicate the -stream is now closed without transmitting any data. Since ttrpc normally -transmits a single object per message, a zero length data message may be -interpreted as an empty object. For example, transmitting the number zero as a -protobuf message ends up with a data length of zero, but the message is still -considered data and should be processed. - -#### Data Flags - -| Flag | Name | Description | -|------|-----------------|-----------------------------------| -| 0x01 | `remote closed` | No more data expected from remote | -| 0x04 | `no data` | This message does not have data | - -## Streaming - -All ttrpc requests use streams to transfer data. Unary streams will only have -two messages sent per stream, a request from a client and a response from the -server. Non-unary streams, however, may send any numbers of messages from the -client and the server. This makes stream management more complicated than unary -streams since both client and server need to track additional state. To keep -this management as simple as possible, ttrpc minimizes the number of states and -uses two flags instead of control frames. Each stream has two states while a -stream is still alive: `local closed` and `remote closed`. Each peer considers -local and remote from their own perspective and sets flags from the other peer's -perspective. For example, if a client sends a data frame with the -`remote closed` flag, that is indicating that the client is now `local closed` -and the server will be `remote closed`. A unary operation does not need to send -these flags since each received message always indicates `remote closed`. Once a -peer is both `local closed` and `remote closed`, the stream is considered -finished and may be cleaned up. - -Due to the asymmetric nature of the current protocol, a client should -always be in the `local closed` state before `remote closed` and a server should -always be in the `remote closed` state before `local closed`. This happens -because the client is always initiating requests and a client always expects a -final response back from a server to indicate the initiated request has been -fulfilled. This may mean server sends a final empty response to finish a stream -even after it has already completed sending data before the client. - -### Unary State Diagram - - +--------+ +--------+ - | Client | | Server | - +---+----+ +----+---+ - | +---------+ | - local >---------------+ Request +--------------------> remote - closed | +---------+ | closed - | | - | +----------+ | - finished <--------------+ Response +--------------------< finished - | +----------+ | - | | - -### Non-Unary State Diagrams - -RC: `remote closed` flag -RO: `remote open` flag - - +--------+ +--------+ - | Client | | Server | - +---+----+ +----+---+ - | +--------------+ | - >-------------+ Request [RO] +-----------------> - | +--------------+ | - | | - | +------+ | - >-----------------+ Data +---------------------> - | +------+ | - | | - | +-----------+ | - local >---------------+ Data [RC] +------------------> remote - closed | +-----------+ | closed - | | - | +----------+ | - finished <--------------+ Response +--------------------< finished - | +----------+ | - | | - - +--------+ +--------+ - | Client | | Server | - +---+----+ +----+---+ - | +--------------+ | - local >-------------+ Request [RC] +-----------------> remote - closed | +--------------+ | closed - | | - | +------+ | - <-----------------+ Data +---------------------< - | +------+ | - | | - | +-----------+ | - finished <---------------+ Data [RC] +------------------< finished - | +-----------+ | - | | - - +--------+ +--------+ - | Client | | Server | - +---+----+ +----+---+ - | +--------------+ | - >-------------+ Request [RO] +-----------------> - | +--------------+ | - | | - | +------+ | - >-----------------+ Data +---------------------> - | +------+ | - | | - | +------+ | - <-----------------+ Data +---------------------< - | +------+ | - | | - | +------+ | - >-----------------+ Data +---------------------> - | +------+ | - | | - | +-----------+ | - local >---------------+ Data [RC] +------------------> remote - closed | +-----------+ | closed - | | - | +------+ | - <-----------------+ Data +---------------------< - | +------+ | - | | - | +-----------+ | - finished <---------------+ Data [RC] +------------------< finished - | +-----------+ | - | | - -## RPC - -While this protocol is defined primarily to support Remote Procedure Calls, the -protocol does not define the request and response types beyond the messages -defined in the protocol. The implementation provides a default protobuf -definition of request and response which may be used for cross language rpc. -All implementations should at least define a request type which support -routing by procedure name and a response type which supports call status. - -## Version History - -| Version | Features | -|---------|---------------------| -| 1.0 | Unary requests only | -| 1.2 | Streaming support | diff --git a/e2e/vendor/github.com/containerd/ttrpc/Protobuild.toml b/e2e/vendor/github.com/containerd/ttrpc/Protobuild.toml deleted file mode 100644 index 0f6ccbd1e81..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/Protobuild.toml +++ /dev/null @@ -1,28 +0,0 @@ -version = "2" -generators = ["go"] - -# Control protoc include paths. Below are usually some good defaults, but feel -# free to try it without them if it works for your project. -[includes] - # Include paths that will be added before all others. Typically, you want to - # treat the root of the project as an include, but this may not be necessary. - before = ["."] - - # Paths that will be added untouched to the end of the includes. We use - # `/usr/local/include` to pickup the common install location of protobuf. - # This is the default. - after = ["/usr/local/include"] - -# This section maps protobuf imports to Go packages. These will become -# `-M` directives in the call to the go protobuf generator. -[packages] - "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" - "proto/status.proto" = "google.golang.org/genproto/googleapis/rpc/status" - -[[overrides]] -# enable ttrpc and disable fieldpath and grpc for the shim -prefixes = ["github.com/containerd/ttrpc/integration/streaming"] -generators = ["go", "go-ttrpc"] - -[overrides.parameters.go-ttrpc] -prefix = "TTRPC" diff --git a/e2e/vendor/github.com/containerd/ttrpc/README.md b/e2e/vendor/github.com/containerd/ttrpc/README.md deleted file mode 100644 index ce95f63beef..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# ttrpc - -[![Build Status](https://github.com/containerd/ttrpc/actions/workflows/ci.yml/badge.svg)](https://github.com/containerd/ttrpc/actions/workflows/ci.yml) - -GRPC for low-memory environments. - -The existing grpc-go project requires a lot of memory overhead for importing -packages and at runtime. While this is great for many services with low density -requirements, this can be a problem when running a large number of services on -a single machine or on a machine with a small amount of memory. - -Using the same GRPC definitions, this project reduces the binary size and -protocol overhead required. We do this by eliding the `net/http`, `net/http2` -and `grpc` package used by grpc replacing it with a lightweight framing -protocol. The result are smaller binaries that use less resident memory with -the same ease of use as GRPC. - -Please note that while this project supports generating either end of the -protocol, the generated service definitions will be incompatible with regular -GRPC services, as they do not speak the same protocol. - -# Protocol - -See the [protocol specification](./PROTOCOL.md). - -# Usage - -Create a gogo vanity binary (see -[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an -example with the ttrpc plugin enabled. - -It's recommended to use [`protobuild`](https://github.com/containerd/protobuild) -to build the protobufs for this project, but this will work with protoc -directly, if required. - -# Differences from GRPC - -- The protocol stack has been replaced with a lighter protocol that doesn't - require http, http2 and tls. -- The client and server interface are identical whereas in GRPC there is a - client and server interface that are different. -- The Go stdlib context package is used instead. - -# Status - -TODO: - -- [ ] Add testing under concurrent load to ensure -- [ ] Verify connection error handling - -# Project details - -ttrpc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/e2e/vendor/github.com/containerd/ttrpc/channel.go b/e2e/vendor/github.com/containerd/ttrpc/channel.go deleted file mode 100644 index 872261e6de6..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/channel.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "bufio" - "encoding/binary" - "fmt" - "io" - "net" - "sync" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const ( - messageHeaderLength = 10 - messageLengthMax = 4 << 20 -) - -type messageType uint8 - -const ( - messageTypeRequest messageType = 0x1 - messageTypeResponse messageType = 0x2 - messageTypeData messageType = 0x3 -) - -func (mt messageType) String() string { - switch mt { - case messageTypeRequest: - return "request" - case messageTypeResponse: - return "response" - case messageTypeData: - return "data" - default: - return "unknown" - } -} - -const ( - flagRemoteClosed uint8 = 0x1 - flagRemoteOpen uint8 = 0x2 - flagNoData uint8 = 0x4 -) - -// messageHeader represents the fixed-length message header of 10 bytes sent -// with every request. -type messageHeader struct { - Length uint32 // length excluding this header. b[:4] - StreamID uint32 // identifies which request stream message is a part of. b[4:8] - Type messageType // message type b[8] - Flags uint8 // type specific flags b[9] -} - -func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) { - _, err := io.ReadFull(r, p[:messageHeaderLength]) - if err != nil { - return messageHeader{}, err - } - - return messageHeader{ - Length: binary.BigEndian.Uint32(p[:4]), - StreamID: binary.BigEndian.Uint32(p[4:8]), - Type: messageType(p[8]), - Flags: p[9], - }, nil -} - -func writeMessageHeader(w io.Writer, p []byte, mh messageHeader) error { - binary.BigEndian.PutUint32(p[:4], mh.Length) - binary.BigEndian.PutUint32(p[4:8], mh.StreamID) - p[8] = byte(mh.Type) - p[9] = mh.Flags - - _, err := w.Write(p[:]) - return err -} - -var buffers sync.Pool - -type channel struct { - conn net.Conn - bw *bufio.Writer - br *bufio.Reader - hrbuf [messageHeaderLength]byte // avoid alloc when reading header - hwbuf [messageHeaderLength]byte -} - -func newChannel(conn net.Conn) *channel { - return &channel{ - conn: conn, - bw: bufio.NewWriter(conn), - br: bufio.NewReader(conn), - } -} - -// recv a message from the channel. The returned buffer contains the message. -// -// If a valid grpc status is returned, the message header -// returned will be valid and caller should send that along to -// the correct consumer. The bytes on the underlying channel -// will be discarded. -func (ch *channel) recv() (messageHeader, []byte, error) { - mh, err := readMessageHeader(ch.hrbuf[:], ch.br) - if err != nil { - return messageHeader{}, nil, err - } - - if mh.Length > uint32(messageLengthMax) { - if _, err := ch.br.Discard(int(mh.Length)); err != nil { - return mh, nil, fmt.Errorf("failed to discard after receiving oversized message: %w", err) - } - - return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax) - } - - var p []byte - if mh.Length > 0 { - p = ch.getmbuf(int(mh.Length)) - if _, err := io.ReadFull(ch.br, p); err != nil { - return messageHeader{}, nil, fmt.Errorf("failed reading message: %w", err) - } - } - - return mh, p, nil -} - -func (ch *channel) send(streamID uint32, t messageType, flags uint8, p []byte) error { - if len(p) > messageLengthMax { - return OversizedMessageError(len(p)) - } - - if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t, Flags: flags}); err != nil { - return err - } - - if len(p) > 0 { - _, err := ch.bw.Write(p) - if err != nil { - return err - } - } - - return ch.bw.Flush() -} - -func (ch *channel) getmbuf(size int) []byte { - // we can't use the standard New method on pool because we want to allocate - // based on size. - b, ok := buffers.Get().(*[]byte) - if !ok || cap(*b) < size { - // TODO(stevvooe): It may be better to allocate these in fixed length - // buckets to reduce fragmentation but its not clear that would help - // with performance. An ilogb approach or similar would work well. - bb := make([]byte, size) - b = &bb - } else { - *b = (*b)[:size] - } - return *b -} - -func (ch *channel) putmbuf(p []byte) { - buffers.Put(&p) -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/client.go b/e2e/vendor/github.com/containerd/ttrpc/client.go deleted file mode 100644 index b1bc7a3fc40..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/client.go +++ /dev/null @@ -1,570 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "strings" - "sync" - "syscall" - "time" - - "github.com/containerd/log" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" -) - -// Client for a ttrpc server -type Client struct { - codec codec - conn net.Conn - channel *channel - - streamLock sync.RWMutex - streams map[streamID]*stream - nextStreamID streamID - sendLock sync.Mutex - - ctx context.Context - closed func() - - closeOnce sync.Once - userCloseFunc func() - userCloseWaitCh chan struct{} - - interceptor UnaryClientInterceptor -} - -// ClientOpts configures a client -type ClientOpts func(c *Client) - -// WithOnClose sets the close func whenever the client's Close() method is called -func WithOnClose(onClose func()) ClientOpts { - return func(c *Client) { - c.userCloseFunc = onClose - } -} - -// WithUnaryClientInterceptor sets the provided client interceptor -func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts { - return func(c *Client) { - c.interceptor = i - } -} - -// WithChainUnaryClientInterceptor sets the provided chain of client interceptors -func WithChainUnaryClientInterceptor(interceptors ...UnaryClientInterceptor) ClientOpts { - return func(c *Client) { - if len(interceptors) == 0 { - return - } - if c.interceptor != nil { - interceptors = append([]UnaryClientInterceptor{c.interceptor}, interceptors...) - } - c.interceptor = func( - ctx context.Context, - req *Request, - reply *Response, - info *UnaryClientInfo, - final Invoker, - ) error { - return interceptors[0](ctx, req, reply, info, - chainUnaryInterceptors(interceptors[1:], final, info)) - } - } -} - -func chainUnaryInterceptors(interceptors []UnaryClientInterceptor, final Invoker, info *UnaryClientInfo) Invoker { - if len(interceptors) == 0 { - return final - } - return func( - ctx context.Context, - req *Request, - reply *Response, - ) error { - return interceptors[0](ctx, req, reply, info, - chainUnaryInterceptors(interceptors[1:], final, info)) - } -} - -// NewClient creates a new ttrpc client using the given connection -func NewClient(conn net.Conn, opts ...ClientOpts) *Client { - ctx, cancel := context.WithCancel(context.Background()) - channel := newChannel(conn) - c := &Client{ - codec: codec{}, - conn: conn, - channel: channel, - streams: make(map[streamID]*stream), - nextStreamID: 1, - closed: cancel, - ctx: ctx, - userCloseFunc: func() {}, - userCloseWaitCh: make(chan struct{}), - } - - for _, o := range opts { - o(c) - } - - if c.interceptor == nil { - c.interceptor = defaultClientInterceptor - } - - go c.run() - return c -} - -func (c *Client) send(sid uint32, mt messageType, flags uint8, b []byte) error { - c.sendLock.Lock() - defer c.sendLock.Unlock() - return c.channel.send(sid, mt, flags, b) -} - -// Call makes a unary request and returns with response -func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error { - payload, err := c.codec.Marshal(req) - if err != nil { - return err - } - - var ( - creq = &Request{ - Service: service, - Method: method, - Payload: payload, - // TODO: metadata from context - } - - cresp = &Response{} - ) - - if metadata, ok := GetMetadata(ctx); ok { - metadata.setRequest(creq) - } - - if dl, ok := ctx.Deadline(); ok { - creq.TimeoutNano = time.Until(dl).Nanoseconds() - } - - info := &UnaryClientInfo{ - FullMethod: fullPath(service, method), - } - if err := c.interceptor(ctx, creq, cresp, info, c.dispatch); err != nil { - return err - } - - if err := c.codec.Unmarshal(cresp.Payload, resp); err != nil { - return err - } - - if cresp.Status != nil && cresp.Status.Code != int32(codes.OK) { - return status.ErrorProto(cresp.Status) - } - return nil -} - -// StreamDesc describes the stream properties, whether the stream has -// a streaming client, a streaming server, or both -type StreamDesc struct { - StreamingClient bool - StreamingServer bool -} - -// ClientStream is used to send or recv messages on the underlying stream -type ClientStream interface { - CloseSend() error - SendMsg(m interface{}) error - RecvMsg(m interface{}) error -} - -type clientStream struct { - ctx context.Context - s *stream - c *Client - desc *StreamDesc - localClosed bool - remoteClosed bool -} - -func (cs *clientStream) CloseSend() error { - if !cs.desc.StreamingClient { - return fmt.Errorf("%w: cannot close non-streaming client", ErrProtocol) - } - if cs.localClosed { - return ErrStreamClosed - } - err := cs.s.send(messageTypeData, flagRemoteClosed|flagNoData, nil) - if err != nil { - return filterCloseErr(err) - } - cs.localClosed = true - return nil -} - -func (cs *clientStream) SendMsg(m interface{}) error { - if !cs.desc.StreamingClient { - return fmt.Errorf("%w: cannot send data from non-streaming client", ErrProtocol) - } - if cs.localClosed { - return ErrStreamClosed - } - - var ( - payload []byte - err error - ) - if m != nil { - payload, err = cs.c.codec.Marshal(m) - if err != nil { - return err - } - } - - err = cs.s.send(messageTypeData, 0, payload) - if err != nil { - return filterCloseErr(err) - } - - return nil -} - -func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.remoteClosed { - return io.EOF - } - - var msg *streamMessage - select { - case <-cs.ctx.Done(): - return cs.ctx.Err() - case <-cs.s.recvClose: - // If recv has a pending message, process that first - select { - case msg = <-cs.s.recv: - default: - return cs.s.recvErr - } - case msg = <-cs.s.recv: - } - - if msg.header.Type == messageTypeResponse { - resp := &Response{} - err := proto.Unmarshal(msg.payload[:msg.header.Length], resp) - // return the payload buffer for reuse - cs.c.channel.putmbuf(msg.payload) - if err != nil { - return err - } - - if err := cs.c.codec.Unmarshal(resp.Payload, m); err != nil { - return err - } - - if resp.Status != nil && resp.Status.Code != int32(codes.OK) { - return status.ErrorProto(resp.Status) - } - - cs.c.deleteStream(cs.s) - cs.remoteClosed = true - - return nil - } else if msg.header.Type == messageTypeData { - if !cs.desc.StreamingServer { - cs.c.deleteStream(cs.s) - cs.remoteClosed = true - return fmt.Errorf("received data from non-streaming server: %w", ErrProtocol) - } - if msg.header.Flags&flagRemoteClosed == flagRemoteClosed { - cs.c.deleteStream(cs.s) - cs.remoteClosed = true - - if msg.header.Flags&flagNoData == flagNoData { - return io.EOF - } - } - - err := cs.c.codec.Unmarshal(msg.payload[:msg.header.Length], m) - cs.c.channel.putmbuf(msg.payload) - if err != nil { - return err - } - return nil - } - - return fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) -} - -// Close closes the ttrpc connection and underlying connection -func (c *Client) Close() error { - c.closeOnce.Do(func() { - c.closed() - - c.conn.Close() - }) - return nil -} - -// UserOnCloseWait is used to block until the user's on-close callback -// finishes. -func (c *Client) UserOnCloseWait(ctx context.Context) error { - select { - case <-c.userCloseWaitCh: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (c *Client) run() { - err := c.receiveLoop() - c.Close() - c.cleanupStreams(err) - - c.userCloseFunc() - close(c.userCloseWaitCh) -} - -func (c *Client) receiveLoop() error { - for { - select { - case <-c.ctx.Done(): - return ErrClosed - default: - var ( - msg = &streamMessage{} - err error - ) - - msg.header, msg.payload, err = c.channel.recv() - if err != nil { - _, ok := status.FromError(err) - if !ok { - // treat all errors that are not an rpc status as terminal. - // all others poison the connection. - return filterCloseErr(err) - } - } - sid := streamID(msg.header.StreamID) - s := c.getStream(sid) - if s == nil { - log.G(c.ctx).WithField("stream", sid).Error("ttrpc: received message on inactive stream") - continue - } - - if err != nil { - s.closeWithError(err) - } else { - if err := s.receive(c.ctx, msg); err != nil { - log.G(c.ctx).WithFields(log.Fields{"error": err, "stream": sid}).Error("ttrpc: failed to handle message") - } - } - } - } -} - -// createStream creates a new stream and registers it with the client -// Introduce stream types for multiple or single response -func (c *Client) createStream(flags uint8, b []byte) (*stream, error) { - // sendLock must be held across both allocation of the stream ID and sending it across the wire. - // This ensures that new stream IDs sent on the wire are always increasing, which is a - // requirement of the TTRPC protocol. - // This use of sendLock could be split into another mutex that covers stream creation + first send, - // and just use sendLock to guard writing to the wire, but for now it seems simpler to have fewer mutexes. - c.sendLock.Lock() - defer c.sendLock.Unlock() - - // Check if closed since lock acquired to prevent adding - // anything after cleanup completes - select { - case <-c.ctx.Done(): - return nil, ErrClosed - default: - } - - var s *stream - if err := func() error { - // In the future this could be replaced with a sync.Map instead of streamLock+map. - c.streamLock.Lock() - defer c.streamLock.Unlock() - - // Check if closed since lock acquired to prevent adding - // anything after cleanup completes - select { - case <-c.ctx.Done(): - return ErrClosed - default: - } - - s = newStream(c.nextStreamID, c) - c.streams[s.id] = s - c.nextStreamID = c.nextStreamID + 2 - - return nil - }(); err != nil { - return nil, err - } - - if err := c.channel.send(uint32(s.id), messageTypeRequest, flags, b); err != nil { - return s, filterCloseErr(err) - } - - return s, nil -} - -func (c *Client) deleteStream(s *stream) { - c.streamLock.Lock() - delete(c.streams, s.id) - c.streamLock.Unlock() - s.closeWithError(nil) -} - -func (c *Client) getStream(sid streamID) *stream { - c.streamLock.RLock() - s := c.streams[sid] - c.streamLock.RUnlock() - return s -} - -func (c *Client) cleanupStreams(err error) { - c.streamLock.Lock() - defer c.streamLock.Unlock() - - for sid, s := range c.streams { - s.closeWithError(err) - delete(c.streams, sid) - } -} - -// filterCloseErr rewrites EOF and EPIPE errors to ErrClosed. Use when -// returning from call or handling errors from main read loop. -// -// This purposely ignores errors with a wrapped cause. -func filterCloseErr(err error) error { - switch { - case err == nil: - return nil - case err == io.EOF: - return ErrClosed - case errors.Is(err, io.ErrClosedPipe): - return ErrClosed - case errors.Is(err, io.EOF): - return ErrClosed - case strings.Contains(err.Error(), "use of closed network connection"): - return ErrClosed - default: - // if we have an epipe on a write or econnreset on a read , we cast to errclosed - var oerr *net.OpError - if errors.As(err, &oerr) { - if (oerr.Op == "write" && errors.Is(err, syscall.EPIPE)) || - (oerr.Op == "read" && errors.Is(err, syscall.ECONNRESET)) { - return ErrClosed - } - } - } - - return err -} - -// NewStream creates a new stream with the given stream descriptor to the -// specified service and method. If not a streaming client, the request object -// may be provided. -func (c *Client) NewStream(ctx context.Context, desc *StreamDesc, service, method string, req interface{}) (ClientStream, error) { - var payload []byte - if req != nil { - var err error - payload, err = c.codec.Marshal(req) - if err != nil { - return nil, err - } - } - - request := &Request{ - Service: service, - Method: method, - Payload: payload, - // TODO: metadata from context - } - p, err := c.codec.Marshal(request) - if err != nil { - return nil, err - } - - var flags uint8 - if desc.StreamingClient { - flags = flagRemoteOpen - } else { - flags = flagRemoteClosed - } - s, err := c.createStream(flags, p) - if err != nil { - return nil, err - } - - return &clientStream{ - ctx: ctx, - s: s, - c: c, - desc: desc, - }, nil -} - -func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { - p, err := c.codec.Marshal(req) - if err != nil { - return err - } - - s, err := c.createStream(0, p) - if err != nil { - return err - } - defer c.deleteStream(s) - - var msg *streamMessage - select { - case <-ctx.Done(): - return ctx.Err() - case <-c.ctx.Done(): - return ErrClosed - case <-s.recvClose: - // If recv has a pending message, process that first - select { - case msg = <-s.recv: - default: - return s.recvErr - } - case msg = <-s.recv: - } - - if msg.header.Type == messageTypeResponse { - err = proto.Unmarshal(msg.payload[:msg.header.Length], resp) - } else { - err = fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) - } - - // return the payload buffer for reuse - c.channel.putmbuf(msg.payload) - - return err -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/codec.go b/e2e/vendor/github.com/containerd/ttrpc/codec.go deleted file mode 100644 index 3e82722a424..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/codec.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "fmt" - - "google.golang.org/protobuf/proto" -) - -type codec struct{} - -func (c codec) Marshal(msg interface{}) ([]byte, error) { - switch v := msg.(type) { - case proto.Message: - return proto.Marshal(v) - default: - return nil, fmt.Errorf("ttrpc: cannot marshal unknown type: %T", msg) - } -} - -func (c codec) Unmarshal(p []byte, msg interface{}) error { - switch v := msg.(type) { - case proto.Message: - return proto.Unmarshal(p, v) - default: - return fmt.Errorf("ttrpc: cannot unmarshal into unknown type: %T", msg) - } -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/config.go b/e2e/vendor/github.com/containerd/ttrpc/config.go deleted file mode 100644 index f401f67be0f..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/config.go +++ /dev/null @@ -1,86 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "errors" -) - -type serverConfig struct { - handshaker Handshaker - interceptor UnaryServerInterceptor -} - -// ServerOpt for configuring a ttrpc server -type ServerOpt func(*serverConfig) error - -// WithServerHandshaker can be passed to NewServer to ensure that the -// handshaker is called before every connection attempt. -// -// Only one handshaker is allowed per server. -func WithServerHandshaker(handshaker Handshaker) ServerOpt { - return func(c *serverConfig) error { - if c.handshaker != nil { - return errors.New("only one handshaker allowed per server") - } - c.handshaker = handshaker - return nil - } -} - -// WithUnaryServerInterceptor sets the provided interceptor on the server -func WithUnaryServerInterceptor(i UnaryServerInterceptor) ServerOpt { - return func(c *serverConfig) error { - if c.interceptor != nil { - return errors.New("only one unchained interceptor allowed per server") - } - c.interceptor = i - return nil - } -} - -// WithChainUnaryServerInterceptor sets the provided chain of server interceptors -func WithChainUnaryServerInterceptor(interceptors ...UnaryServerInterceptor) ServerOpt { - return func(c *serverConfig) error { - if len(interceptors) == 0 { - return nil - } - if c.interceptor != nil { - interceptors = append([]UnaryServerInterceptor{c.interceptor}, interceptors...) - } - c.interceptor = func( - ctx context.Context, - unmarshal Unmarshaler, - info *UnaryServerInfo, - method Method) (interface{}, error) { - return interceptors[0](ctx, unmarshal, info, - chainUnaryServerInterceptors(info, method, interceptors[1:])) - } - return nil - } -} - -func chainUnaryServerInterceptors(info *UnaryServerInfo, method Method, interceptors []UnaryServerInterceptor) Method { - if len(interceptors) == 0 { - return method - } - return func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - return interceptors[0](ctx, unmarshal, info, - chainUnaryServerInterceptors(info, method, interceptors[1:])) - } -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/doc.go b/e2e/vendor/github.com/containerd/ttrpc/doc.go deleted file mode 100644 index d80cd424cc8..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* -package ttrpc defines and implements a low level simple transfer protocol -optimized for low latency and reliable connections between processes on the same -host. The protocol uses simple framing for sending requests, responses, and data -using multiple streams. -*/ -package ttrpc diff --git a/e2e/vendor/github.com/containerd/ttrpc/errors.go b/e2e/vendor/github.com/containerd/ttrpc/errors.go deleted file mode 100644 index 632dbe8bdf5..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/errors.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "errors" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var ( - // ErrProtocol is a general error in the handling the protocol. - ErrProtocol = errors.New("protocol error") - - // ErrClosed is returned by client methods when the underlying connection is - // closed. - ErrClosed = errors.New("ttrpc: closed") - - // ErrServerClosed is returned when the Server has closed its connection. - ErrServerClosed = errors.New("ttrpc: server closed") - - // ErrStreamClosed is when the streaming connection is closed. - ErrStreamClosed = errors.New("ttrpc: stream closed") -) - -// OversizedMessageErr is used to indicate refusal to send an oversized message. -// It wraps a ResourceExhausted grpc Status together with the offending message -// length. -type OversizedMessageErr struct { - messageLength int - err error -} - -// OversizedMessageError returns an OversizedMessageErr error for the given message -// length if it exceeds the allowed maximum. Otherwise a nil error is returned. -func OversizedMessageError(messageLength int) error { - if messageLength <= messageLengthMax { - return nil - } - - return &OversizedMessageErr{ - messageLength: messageLength, - err: status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", messageLength, messageLengthMax), - } -} - -// Error returns the error message for the corresponding grpc Status for the error. -func (e *OversizedMessageErr) Error() string { - return e.err.Error() -} - -// Unwrap returns the corresponding error with our grpc status code. -func (e *OversizedMessageErr) Unwrap() error { - return e.err -} - -// RejectedLength retrieves the rejected message length which triggered the error. -func (e *OversizedMessageErr) RejectedLength() int { - return e.messageLength -} - -// MaximumLength retrieves the maximum allowed message length that triggered the error. -func (*OversizedMessageErr) MaximumLength() int { - return messageLengthMax -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/handshake.go b/e2e/vendor/github.com/containerd/ttrpc/handshake.go deleted file mode 100644 index 3c6b610d35d..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/handshake.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "net" -) - -// Handshaker defines the interface for connection handshakes performed on the -// server or client when first connecting. -type Handshaker interface { - // Handshake should confirm or decorate a connection that may be incoming - // to a server or outgoing from a client. - // - // If this returns without an error, the caller should use the connection - // in place of the original connection. - // - // The second return value can contain credential specific data, such as - // unix socket credentials or TLS information. - // - // While we currently only have implementations on the server-side, this - // interface should be sufficient to implement similar handshakes on the - // client-side. - Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) -} - -type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) - -func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { - return fn(ctx, conn) -} - -func noopHandshake(_ context.Context, conn net.Conn) (net.Conn, interface{}, error) { - return conn, nil, nil -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/interceptor.go b/e2e/vendor/github.com/containerd/ttrpc/interceptor.go deleted file mode 100644 index 7ff5e9d33f2..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/interceptor.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import "context" - -// UnaryServerInfo provides information about the server request -type UnaryServerInfo struct { - FullMethod string -} - -// UnaryClientInfo provides information about the client request -type UnaryClientInfo struct { - FullMethod string -} - -// StreamServerInfo provides information about the server request -type StreamServerInfo struct { - FullMethod string - StreamingClient bool - StreamingServer bool -} - -// Unmarshaler contains the server request data and allows it to be unmarshaled -// into a concrete type -type Unmarshaler func(interface{}) error - -// Invoker invokes the client's request and response from the ttrpc server -type Invoker func(context.Context, *Request, *Response) error - -// UnaryServerInterceptor specifies the interceptor function for server request/response -type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, Method) (interface{}, error) - -// UnaryClientInterceptor specifies the interceptor function for client request/response -type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error - -func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, _ *UnaryServerInfo, method Method) (interface{}, error) { - return method(ctx, unmarshal) -} - -func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error { - return invoker(ctx, req, resp) -} - -type StreamServerInterceptor func(context.Context, StreamServer, *StreamServerInfo, StreamHandler) (interface{}, error) - -func defaultStreamServerInterceptor(ctx context.Context, ss StreamServer, _ *StreamServerInfo, stream StreamHandler) (interface{}, error) { - return stream(ctx, ss) -} - -type StreamClientInterceptor func(context.Context) diff --git a/e2e/vendor/github.com/containerd/ttrpc/metadata.go b/e2e/vendor/github.com/containerd/ttrpc/metadata.go deleted file mode 100644 index ce8c0d13c41..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/metadata.go +++ /dev/null @@ -1,107 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "strings" -) - -// MD is the user type for ttrpc metadata -type MD map[string][]string - -// Get returns the metadata for a given key when they exist. -// If there is no metadata, a nil slice and false are returned. -func (m MD) Get(key string) ([]string, bool) { - key = strings.ToLower(key) - list, ok := m[key] - if !ok || len(list) == 0 { - return nil, false - } - - return list, true -} - -// Set sets the provided values for a given key. -// The values will overwrite any existing values. -// If no values provided, a key will be deleted. -func (m MD) Set(key string, values ...string) { - key = strings.ToLower(key) - if len(values) == 0 { - delete(m, key) - return - } - m[key] = values -} - -// Append appends additional values to the given key. -func (m MD) Append(key string, values ...string) { - key = strings.ToLower(key) - if len(values) == 0 { - return - } - current, ok := m[key] - if ok { - m.Set(key, append(current, values...)...) - } else { - m.Set(key, values...) - } -} - -func (m MD) setRequest(r *Request) { - for k, values := range m { - for _, v := range values { - r.Metadata = append(r.Metadata, &KeyValue{ - Key: k, - Value: v, - }) - } - } -} - -func (m MD) fromRequest(r *Request) { - for _, kv := range r.Metadata { - m[kv.Key] = append(m[kv.Key], kv.Value) - } -} - -type metadataKey struct{} - -// GetMetadata retrieves metadata from context.Context (previously attached with WithMetadata) -func GetMetadata(ctx context.Context) (MD, bool) { - metadata, ok := ctx.Value(metadataKey{}).(MD) - return metadata, ok -} - -// GetMetadataValue gets a specific metadata value by name from context.Context -func GetMetadataValue(ctx context.Context, name string) (string, bool) { - metadata, ok := GetMetadata(ctx) - if !ok { - return "", false - } - - if list, ok := metadata.Get(name); ok { - return list[0], true - } - - return "", false -} - -// WithMetadata attaches metadata map to a context.Context -func WithMetadata(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, metadataKey{}, md) -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/request.pb.go b/e2e/vendor/github.com/containerd/ttrpc/request.pb.go deleted file mode 100644 index 3921ae5a356..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/request.pb.go +++ /dev/null @@ -1,396 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.20.1 -// source: github.com/containerd/ttrpc/request.proto - -package ttrpc - -import ( - status "google.golang.org/genproto/googleapis/rpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Request struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` - Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` - Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` - TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,json=timeoutNano,proto3" json:"timeout_nano,omitempty"` - Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty"` -} - -func (x *Request) Reset() { - *x = Request{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Request) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Request) ProtoMessage() {} - -func (x *Request) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Request.ProtoReflect.Descriptor instead. -func (*Request) Descriptor() ([]byte, []int) { - return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{0} -} - -func (x *Request) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *Request) GetMethod() string { - if x != nil { - return x.Method - } - return "" -} - -func (x *Request) GetPayload() []byte { - if x != nil { - return x.Payload - } - return nil -} - -func (x *Request) GetTimeoutNano() int64 { - if x != nil { - return x.TimeoutNano - } - return 0 -} - -func (x *Request) GetMetadata() []*KeyValue { - if x != nil { - return x.Metadata - } - return nil -} - -type Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (x *Response) Reset() { - *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Response) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Response) ProtoMessage() {} - -func (x *Response) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Response.ProtoReflect.Descriptor instead. -func (*Response) Descriptor() ([]byte, []int) { - return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{1} -} - -func (x *Response) GetStatus() *status.Status { - if x != nil { - return x.Status - } - return nil -} - -func (x *Response) GetPayload() []byte { - if x != nil { - return x.Payload - } - return nil -} - -type StringList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []string `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *StringList) Reset() { - *x = StringList{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StringList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StringList) ProtoMessage() {} - -func (x *StringList) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StringList.ProtoReflect.Descriptor instead. -func (*StringList) Descriptor() ([]byte, []int) { - return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{2} -} - -func (x *StringList) GetList() []string { - if x != nil { - return x.List - } - return nil -} - -type KeyValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *KeyValue) Reset() { - *x = KeyValue{} - if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *KeyValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*KeyValue) ProtoMessage() {} - -func (x *KeyValue) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. -func (*KeyValue) Descriptor() ([]byte, []int) { - return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{3} -} - -func (x *KeyValue) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *KeyValue) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -var File_github_com_containerd_ttrpc_request_proto protoreflect.FileDescriptor - -var file_github_com_containerd_ttrpc_request_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x74, 0x72, - 0x70, 0x63, 0x1a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4e, 0x61, 0x6e, - 0x6f, 0x12, 0x2b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x45, - 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x07, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x20, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x1d, 0x5a, 0x1b, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_github_com_containerd_ttrpc_request_proto_rawDescOnce sync.Once - file_github_com_containerd_ttrpc_request_proto_rawDescData = file_github_com_containerd_ttrpc_request_proto_rawDesc -) - -func file_github_com_containerd_ttrpc_request_proto_rawDescGZIP() []byte { - file_github_com_containerd_ttrpc_request_proto_rawDescOnce.Do(func() { - file_github_com_containerd_ttrpc_request_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_ttrpc_request_proto_rawDescData) - }) - return file_github_com_containerd_ttrpc_request_proto_rawDescData -} - -var file_github_com_containerd_ttrpc_request_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_github_com_containerd_ttrpc_request_proto_goTypes = []interface{}{ - (*Request)(nil), // 0: ttrpc.Request - (*Response)(nil), // 1: ttrpc.Response - (*StringList)(nil), // 2: ttrpc.StringList - (*KeyValue)(nil), // 3: ttrpc.KeyValue - (*status.Status)(nil), // 4: Status -} -var file_github_com_containerd_ttrpc_request_proto_depIdxs = []int32{ - 3, // 0: ttrpc.Request.metadata:type_name -> ttrpc.KeyValue - 4, // 1: ttrpc.Response.status:type_name -> Status - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_github_com_containerd_ttrpc_request_proto_init() } -func file_github_com_containerd_ttrpc_request_proto_init() { - if File_github_com_containerd_ttrpc_request_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_github_com_containerd_ttrpc_request_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Request); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_ttrpc_request_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_ttrpc_request_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_github_com_containerd_ttrpc_request_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KeyValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_containerd_ttrpc_request_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_containerd_ttrpc_request_proto_goTypes, - DependencyIndexes: file_github_com_containerd_ttrpc_request_proto_depIdxs, - MessageInfos: file_github_com_containerd_ttrpc_request_proto_msgTypes, - }.Build() - File_github_com_containerd_ttrpc_request_proto = out.File - file_github_com_containerd_ttrpc_request_proto_rawDesc = nil - file_github_com_containerd_ttrpc_request_proto_goTypes = nil - file_github_com_containerd_ttrpc_request_proto_depIdxs = nil -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/request.proto b/e2e/vendor/github.com/containerd/ttrpc/request.proto deleted file mode 100644 index 37da334fc2a..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/request.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package ttrpc; - -import "proto/status.proto"; - -option go_package = "github.com/containerd/ttrpc"; - -message Request { - string service = 1; - string method = 2; - bytes payload = 3; - int64 timeout_nano = 4; - repeated KeyValue metadata = 5; -} - -message Response { - Status status = 1; - bytes payload = 2; -} - -message StringList { - repeated string list = 1; -} - -message KeyValue { - string key = 1; - string value = 2; -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/server.go b/e2e/vendor/github.com/containerd/ttrpc/server.go deleted file mode 100644 index 26419831dac..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/server.go +++ /dev/null @@ -1,579 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "errors" - "io" - "math/rand" - "net" - "sync" - "sync/atomic" - "syscall" - "time" - - "github.com/containerd/log" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type Server struct { - config *serverConfig - services *serviceSet - codec codec - - mu sync.Mutex - listeners map[net.Listener]struct{} - connections map[*serverConn]struct{} // all connections to current state - done chan struct{} // marks point at which we stop serving requests -} - -func NewServer(opts ...ServerOpt) (*Server, error) { - config := &serverConfig{} - for _, opt := range opts { - if err := opt(config); err != nil { - return nil, err - } - } - if config.interceptor == nil { - config.interceptor = defaultServerInterceptor - } - - return &Server{ - config: config, - services: newServiceSet(config.interceptor), - done: make(chan struct{}), - listeners: make(map[net.Listener]struct{}), - connections: make(map[*serverConn]struct{}), - }, nil -} - -// Register registers a map of methods to method handlers -// TODO: Remove in 2.0, does not support streams -func (s *Server) Register(name string, methods map[string]Method) { - s.services.register(name, &ServiceDesc{Methods: methods}) -} - -func (s *Server) RegisterService(name string, desc *ServiceDesc) { - s.services.register(name, desc) -} - -func (s *Server) Serve(ctx context.Context, l net.Listener) error { - s.addListener(l) - defer s.closeListener(l) - - var ( - backoff time.Duration - handshaker = s.config.handshaker - ) - - if handshaker == nil { - handshaker = handshakerFunc(noopHandshake) - } - - for { - conn, err := l.Accept() - if err != nil { - select { - case <-s.done: - return ErrServerClosed - default: - } - - if terr, ok := err.(interface { - Temporary() bool - }); ok && terr.Temporary() { - if backoff == 0 { - backoff = time.Millisecond - } else { - backoff *= 2 - } - - if max := time.Second; backoff > max { - backoff = max - } - - sleep := time.Duration(rand.Int63n(int64(backoff))) - log.G(ctx).WithError(err).Errorf("ttrpc: failed accept; backoff %v", sleep) - time.Sleep(sleep) - continue - } - - return err - } - - backoff = 0 - - approved, handshake, err := handshaker.Handshake(ctx, conn) - if err != nil { - log.G(ctx).WithError(err).Error("ttrpc: refusing connection after handshake") - conn.Close() - continue - } - - sc, err := s.newConn(approved, handshake) - if err != nil { - log.G(ctx).WithError(err).Error("ttrpc: create connection failed") - conn.Close() - continue - } - - go sc.run(ctx) - } -} - -func (s *Server) Shutdown(ctx context.Context) error { - s.mu.Lock() - select { - case <-s.done: - default: - // protected by mutex - close(s.done) - } - lnerr := s.closeListeners() - s.mu.Unlock() - - ticker := time.NewTicker(200 * time.Millisecond) - defer ticker.Stop() - for { - s.closeIdleConns() - - if s.countConnection() == 0 { - break - } - - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - } - } - - return lnerr -} - -// Close the server without waiting for active connections. -func (s *Server) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - - select { - case <-s.done: - default: - // protected by mutex - close(s.done) - } - - err := s.closeListeners() - for c := range s.connections { - c.close() - delete(s.connections, c) - } - - return err -} - -func (s *Server) addListener(l net.Listener) { - s.mu.Lock() - defer s.mu.Unlock() - s.listeners[l] = struct{}{} -} - -func (s *Server) closeListener(l net.Listener) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.closeListenerLocked(l) -} - -func (s *Server) closeListenerLocked(l net.Listener) error { - defer delete(s.listeners, l) - return l.Close() -} - -func (s *Server) closeListeners() error { - var err error - for l := range s.listeners { - if cerr := s.closeListenerLocked(l); cerr != nil && err == nil { - err = cerr - } - } - return err -} - -func (s *Server) addConnection(c *serverConn) error { - s.mu.Lock() - defer s.mu.Unlock() - - select { - case <-s.done: - return ErrServerClosed - default: - } - - s.connections[c] = struct{}{} - return nil -} - -func (s *Server) delConnection(c *serverConn) { - s.mu.Lock() - defer s.mu.Unlock() - - delete(s.connections, c) -} - -func (s *Server) countConnection() int { - s.mu.Lock() - defer s.mu.Unlock() - - return len(s.connections) -} - -func (s *Server) closeIdleConns() { - s.mu.Lock() - defer s.mu.Unlock() - - for c := range s.connections { - if st, ok := c.getState(); !ok || st == connStateActive { - continue - } - c.close() - delete(s.connections, c) - } -} - -type connState int - -const ( - connStateActive = iota + 1 // outstanding requests - connStateIdle // no requests - connStateClosed // closed connection -) - -func (cs connState) String() string { - switch cs { - case connStateActive: - return "active" - case connStateIdle: - return "idle" - case connStateClosed: - return "closed" - default: - return "unknown" - } -} - -func (s *Server) newConn(conn net.Conn, handshake interface{}) (*serverConn, error) { - c := &serverConn{ - server: s, - conn: conn, - handshake: handshake, - shutdown: make(chan struct{}), - } - c.setState(connStateIdle) - if err := s.addConnection(c); err != nil { - c.close() - return nil, err - } - return c, nil -} - -type serverConn struct { - server *Server - conn net.Conn - handshake interface{} // data from handshake, not used for now - state atomic.Value - - shutdownOnce sync.Once - shutdown chan struct{} // forced shutdown, used by close -} - -func (c *serverConn) getState() (connState, bool) { - cs, ok := c.state.Load().(connState) - return cs, ok -} - -func (c *serverConn) setState(newstate connState) { - c.state.Store(newstate) -} - -func (c *serverConn) close() error { - c.shutdownOnce.Do(func() { - close(c.shutdown) - }) - - return nil -} - -func (c *serverConn) run(sctx context.Context) { - type ( - response struct { - id uint32 - status *status.Status - data []byte - closeStream bool - streaming bool - } - ) - - var ( - ch = newChannel(c.conn) - ctx, cancel = context.WithCancel(sctx) - state connState = connStateIdle - responses = make(chan response) - recvErr = make(chan error, 1) - done = make(chan struct{}) - streams = sync.Map{} - active int32 - lastStreamID uint32 - ) - - defer c.conn.Close() - defer cancel() - defer close(done) - defer c.server.delConnection(c) - - sendStatus := func(id uint32, st *status.Status) bool { - select { - case responses <- response{ - // even though we've had an invalid stream id, we send it - // back on the same stream id so the client knows which - // stream id was bad. - id: id, - status: st, - closeStream: true, - }: - return true - case <-c.shutdown: - return false - case <-done: - return false - } - } - - go func(recvErr chan error) { - defer close(recvErr) - for { - select { - case <-c.shutdown: - return - case <-done: - return - default: // proceed - } - - mh, p, err := ch.recv() - if err != nil { - status, ok := status.FromError(err) - if !ok { - recvErr <- err - return - } - - // in this case, we send an error for that particular message - // when the status is defined. - if !sendStatus(mh.StreamID, status) { - return - } - - continue - } - - if mh.StreamID%2 != 1 { - // enforce odd client initiated identifiers. - if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { - return - } - continue - } - - if mh.Type == messageTypeData { - i, ok := streams.Load(mh.StreamID) - if !ok { - if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID is no longer active")) { - return - } - } - sh := i.(*streamHandler) - if mh.Flags&flagNoData != flagNoData { - unmarshal := func(obj interface{}) error { - err := protoUnmarshal(p, obj) - ch.putmbuf(p) - return err - } - - if err := sh.data(unmarshal); err != nil { - if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "data handling error: %v", err)) { - return - } - } - } - - if mh.Flags&flagRemoteClosed == flagRemoteClosed { - sh.closeSend() - if len(p) > 0 { - if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "data close message cannot include data")) { - return - } - } - } - } else if mh.Type == messageTypeRequest { - if mh.StreamID <= lastStreamID { - // enforce odd client initiated identifiers. - if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID cannot be re-used and must increment")) { - return - } - continue - - } - lastStreamID = mh.StreamID - - // TODO: Make request type configurable - // Unmarshaller which takes in a byte array and returns an interface? - var req Request - if err := c.server.codec.Unmarshal(p, &req); err != nil { - ch.putmbuf(p) - if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { - return - } - continue - } - ch.putmbuf(p) - - id := mh.StreamID - respond := func(status *status.Status, data []byte, streaming, closeStream bool) error { - select { - case responses <- response{ - id: id, - status: status, - data: data, - closeStream: closeStream, - streaming: streaming, - }: - case <-done: - return ErrClosed - } - return nil - } - sh, err := c.server.services.handle(ctx, &req, respond) - if err != nil { - status, _ := status.FromError(err) - if !sendStatus(mh.StreamID, status) { - return - } - continue - } - - streams.Store(id, sh) - atomic.AddInt32(&active, 1) - } - // TODO: else we must ignore this for future compat. log this? - } - }(recvErr) - - for { - var ( - newstate connState - shutdown chan struct{} - ) - - activeN := atomic.LoadInt32(&active) - if activeN > 0 { - newstate = connStateActive - shutdown = nil - } else { - newstate = connStateIdle - shutdown = c.shutdown // only enable this branch in idle mode - } - if newstate != state { - c.setState(newstate) - state = newstate - } - - select { - case response := <-responses: - if !response.streaming || response.status.Code() != codes.OK { - p, err := c.server.codec.Marshal(&Response{ - Status: response.status.Proto(), - Payload: response.data, - }) - if err != nil { - log.G(ctx).WithError(err).Error("failed marshaling response") - return - } - - if err := ch.send(response.id, messageTypeResponse, 0, p); err != nil { - log.G(ctx).WithError(err).Error("failed sending message on channel") - return - } - } else { - var flags uint8 - if response.closeStream { - flags = flagRemoteClosed - } - if response.data == nil { - flags = flags | flagNoData - } - if err := ch.send(response.id, messageTypeData, flags, response.data); err != nil { - log.G(ctx).WithError(err).Error("failed sending message on channel") - return - } - } - - if response.closeStream { - // The ttrpc protocol currently does not support the case where - // the server is localClosed but not remoteClosed. Once the server - // is closing, the whole stream may be considered finished - streams.Delete(response.id) - atomic.AddInt32(&active, -1) - } - case err := <-recvErr: - // TODO(stevvooe): Not wildly clear what we should do in this - // branch. Basically, it means that we are no longer receiving - // requests due to a terminal error. - recvErr = nil // connection is now "closing" - if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET) { - // The client went away and we should stop processing - // requests, so that the client connection is closed - return - } - log.G(ctx).WithError(err).Error("error receiving message") - // else, initiate shutdown - case <-shutdown: - return - } - } -} - -var noopFunc = func() {} - -func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) { - if len(req.Metadata) > 0 { - md := MD{} - md.fromRequest(req) - ctx = WithMetadata(ctx, md) - } - - cancel = noopFunc - if req.TimeoutNano == 0 { - return ctx, cancel - } - - ctx, cancel = context.WithTimeout(ctx, time.Duration(req.TimeoutNano)) - return ctx, cancel -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/services.go b/e2e/vendor/github.com/containerd/ttrpc/services.go deleted file mode 100644 index 6d092bf950f..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/services.go +++ /dev/null @@ -1,279 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "path" - "unsafe" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" -) - -type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) - -type StreamHandler func(context.Context, StreamServer) (interface{}, error) - -type Stream struct { - Handler StreamHandler - StreamingClient bool - StreamingServer bool -} - -type ServiceDesc struct { - Methods map[string]Method - Streams map[string]Stream -} - -type serviceSet struct { - services map[string]*ServiceDesc - unaryInterceptor UnaryServerInterceptor - streamInterceptor StreamServerInterceptor -} - -func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet { - return &serviceSet{ - services: make(map[string]*ServiceDesc), - unaryInterceptor: interceptor, - streamInterceptor: defaultStreamServerInterceptor, - } -} - -func (s *serviceSet) register(name string, desc *ServiceDesc) { - if _, ok := s.services[name]; ok { - panic(fmt.Errorf("duplicate service %v registered", name)) - } - - s.services[name] = desc -} - -func (s *serviceSet) unaryCall(ctx context.Context, method Method, info *UnaryServerInfo, data []byte) (p []byte, st *status.Status) { - unmarshal := func(obj interface{}) error { - return protoUnmarshal(data, obj) - } - - resp, err := s.unaryInterceptor(ctx, unmarshal, info, method) - if err == nil { - if isNil(resp) { - err = errors.New("ttrpc: marshal called with nil") - } else { - p, err = protoMarshal(resp) - } - } - - st, ok := status.FromError(err) - if !ok { - st = status.New(convertCode(err), err.Error()) - } - - return p, st -} - -func (s *serviceSet) streamCall(ctx context.Context, stream StreamHandler, info *StreamServerInfo, ss StreamServer) (p []byte, st *status.Status) { - resp, err := s.streamInterceptor(ctx, ss, info, stream) - if err == nil { - p, err = protoMarshal(resp) - } - st, ok := status.FromError(err) - if !ok { - st = status.New(convertCode(err), err.Error()) - } - return -} - -func (s *serviceSet) handle(ctx context.Context, req *Request, respond func(*status.Status, []byte, bool, bool) error) (*streamHandler, error) { - srv, ok := s.services[req.Service] - if !ok { - return nil, status.Errorf(codes.Unimplemented, "service %v", req.Service) - } - - if method, ok := srv.Methods[req.Method]; ok { - go func() { - ctx, cancel := getRequestContext(ctx, req) - defer cancel() - - info := &UnaryServerInfo{ - FullMethod: fullPath(req.Service, req.Method), - } - p, st := s.unaryCall(ctx, method, info, req.Payload) - - respond(st, p, false, true) - }() - return nil, nil - } - if stream, ok := srv.Streams[req.Method]; ok { - ctx, cancel := getRequestContext(ctx, req) - info := &StreamServerInfo{ - FullMethod: fullPath(req.Service, req.Method), - StreamingClient: stream.StreamingClient, - StreamingServer: stream.StreamingServer, - } - sh := &streamHandler{ - ctx: ctx, - respond: respond, - recv: make(chan Unmarshaler, 5), - info: info, - } - go func() { - defer cancel() - p, st := s.streamCall(ctx, stream.Handler, info, sh) - respond(st, p, stream.StreamingServer, true) - }() - - // Empty proto messages serialized to 0 payloads, - // so signatures like: rpc Stream(google.protobuf.Empty) returns (stream Data); - // don't get invoked here, which causes hang on client side. - // See https://github.com/containerd/ttrpc/issues/126 - if req.Payload != nil || !info.StreamingClient { - unmarshal := func(obj interface{}) error { - return protoUnmarshal(req.Payload, obj) - } - if err := sh.data(unmarshal); err != nil { - return nil, err - } - } - - return sh, nil - } - return nil, status.Errorf(codes.Unimplemented, "method %v", req.Method) -} - -type streamHandler struct { - ctx context.Context - respond func(*status.Status, []byte, bool, bool) error - recv chan Unmarshaler - info *StreamServerInfo - - remoteClosed bool - localClosed bool -} - -func (s *streamHandler) closeSend() { - if !s.remoteClosed { - s.remoteClosed = true - close(s.recv) - } -} - -func (s *streamHandler) data(unmarshal Unmarshaler) error { - if s.remoteClosed { - return ErrStreamClosed - } - select { - case s.recv <- unmarshal: - return nil - case <-s.ctx.Done(): - return s.ctx.Err() - } -} - -func (s *streamHandler) SendMsg(m interface{}) error { - if s.localClosed { - return ErrStreamClosed - } - p, err := protoMarshal(m) - if err != nil { - return err - } - return s.respond(nil, p, true, false) -} - -func (s *streamHandler) RecvMsg(m interface{}) error { - select { - case unmarshal, ok := <-s.recv: - if !ok { - return io.EOF - } - return unmarshal(m) - case <-s.ctx.Done(): - return s.ctx.Err() - - } -} - -func protoUnmarshal(p []byte, obj interface{}) error { - switch v := obj.(type) { - case proto.Message: - if err := proto.Unmarshal(p, v); err != nil { - return status.Errorf(codes.Internal, "ttrpc: error unmarshalling payload: %v", err.Error()) - } - default: - return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) - } - return nil -} - -func protoMarshal(obj interface{}) ([]byte, error) { - if obj == nil { - return nil, nil - } - - switch v := obj.(type) { - case proto.Message: - r, err := proto.Marshal(v) - if err != nil { - return nil, status.Errorf(codes.Internal, "ttrpc: error marshaling payload: %v", err.Error()) - } - - return r, nil - default: - return nil, status.Errorf(codes.Internal, "ttrpc: error unsupported response type: %T", v) - } -} - -// convertCode maps stdlib go errors into grpc space. -// -// This is ripped from the grpc-go code base. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled: - return codes.Canceled - case context.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown -} - -func fullPath(service, method string) string { - return "/" + path.Join(service, method) -} - -func isNil(resp interface{}) bool { - return (*[2]uintptr)(unsafe.Pointer(&resp))[1] == 0 -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/stream.go b/e2e/vendor/github.com/containerd/ttrpc/stream.go deleted file mode 100644 index 739a4c9675b..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/stream.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "sync" -) - -type streamID uint32 - -type streamMessage struct { - header messageHeader - payload []byte -} - -type stream struct { - id streamID - sender sender - recv chan *streamMessage - - closeOnce sync.Once - recvErr error - recvClose chan struct{} -} - -func newStream(id streamID, send sender) *stream { - return &stream{ - id: id, - sender: send, - recv: make(chan *streamMessage, 1), - recvClose: make(chan struct{}), - } -} - -func (s *stream) closeWithError(err error) error { - s.closeOnce.Do(func() { - if err != nil { - s.recvErr = err - } else { - s.recvErr = ErrClosed - } - close(s.recvClose) - }) - return nil -} - -func (s *stream) send(mt messageType, flags uint8, b []byte) error { - return s.sender.send(uint32(s.id), mt, flags, b) -} - -func (s *stream) receive(ctx context.Context, msg *streamMessage) error { - select { - case <-s.recvClose: - return s.recvErr - default: - } - select { - case <-s.recvClose: - return s.recvErr - case s.recv <- msg: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -type sender interface { - send(uint32, messageType, uint8, []byte) error -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/stream_server.go b/e2e/vendor/github.com/containerd/ttrpc/stream_server.go deleted file mode 100644 index b6d1ba720a4..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/stream_server.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -type StreamServer interface { - SendMsg(m interface{}) error - RecvMsg(m interface{}) error -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/test.proto b/e2e/vendor/github.com/containerd/ttrpc/test.proto deleted file mode 100644 index 0e114d55688..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/test.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package ttrpc; - -option go_package = "github.com/containerd/ttrpc/internal"; - -message TestPayload { - string foo = 1; - int64 deadline = 2; - string metadata = 3; -} - -message EchoPayload { - int64 seq = 1; - string msg = 2; -} diff --git a/e2e/vendor/github.com/containerd/ttrpc/unixcreds_linux.go b/e2e/vendor/github.com/containerd/ttrpc/unixcreds_linux.go deleted file mode 100644 index c82c9f9d4c7..00000000000 --- a/e2e/vendor/github.com/containerd/ttrpc/unixcreds_linux.go +++ /dev/null @@ -1,105 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "context" - "errors" - "fmt" - "net" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -type UnixCredentialsFunc func(*unix.Ucred) error - -func (fn UnixCredentialsFunc) Handshake(_ context.Context, conn net.Conn) (net.Conn, interface{}, error) { - uc, err := requireUnixSocket(conn) - if err != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: require unix socket: %w", err) - } - - rs, err := uc.SyscallConn() - if err != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed: %w", err) - } - var ( - ucred *unix.Ucred - ucredErr error - ) - if err := rs.Control(func(fd uintptr) { - ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED) - }); err != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed: %w", err) - } - - if ucredErr != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials: %w", ucredErr) - } - - if err := fn(ucred); err != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: credential check failed: %w", err) - } - - return uc, ucred, nil -} - -// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID. -// -// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an -// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001. -// So calling this function with uid=0 allows a connection from effective UID 0 but rejects -// a connection from effective UID 1001. -// -// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)." -func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc { - return func(ucred *unix.Ucred) error { - return requireUidGid(ucred, uid, gid) - } -} - -func UnixSocketRequireRoot() UnixCredentialsFunc { - return UnixSocketRequireUidGid(0, 0) -} - -// UnixSocketRequireSameUser resolves the current effective unix user and returns a -// UnixCredentialsFunc that will validate incoming unix connections against the -// current credentials. -// -// This is useful when using abstract sockets that are accessible by all users. -func UnixSocketRequireSameUser() UnixCredentialsFunc { - euid, egid := os.Geteuid(), os.Getegid() - return UnixSocketRequireUidGid(euid, egid) -} - -func requireUidGid(ucred *unix.Ucred, uid, gid int) error { - if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) { - return fmt.Errorf("ttrpc: invalid credentials: %v", syscall.EPERM) - } - return nil -} - -func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) { - uc, ok := conn.(*net.UnixConn) - if !ok { - return nil, errors.New("a unix socket connection is required") - } - - return uc, nil -} diff --git a/e2e/vendor/github.com/containerd/typeurl/v2/.gitignore b/e2e/vendor/github.com/containerd/typeurl/v2/.gitignore deleted file mode 100644 index d53846778b6..00000000000 --- a/e2e/vendor/github.com/containerd/typeurl/v2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.test -coverage.txt diff --git a/e2e/vendor/github.com/containerd/typeurl/v2/LICENSE b/e2e/vendor/github.com/containerd/typeurl/v2/LICENSE deleted file mode 100644 index 584149b6ee2..00000000000 --- a/e2e/vendor/github.com/containerd/typeurl/v2/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/github.com/containerd/typeurl/v2/README.md b/e2e/vendor/github.com/containerd/typeurl/v2/README.md deleted file mode 100644 index 3098526ab1f..00000000000 --- a/e2e/vendor/github.com/containerd/typeurl/v2/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# typeurl - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl) -[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/typeurl/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl) - -A Go package for managing the registration, marshaling, and unmarshaling of encoded types. - -This package helps when types are sent over a ttrpc/GRPC API and marshaled as a protobuf [Any](https://pkg.go.dev/google.golang.org/protobuf@v1.27.1/types/known/anypb#Any) - -## Project details - -**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. - -## Optional - -By default, support for gogoproto is available along side the standard Google -protobuf types. -You can choose to leave gogo support out by using the `!no_gogo` build tag. diff --git a/e2e/vendor/github.com/containerd/typeurl/v2/doc.go b/e2e/vendor/github.com/containerd/typeurl/v2/doc.go deleted file mode 100644 index c0d0fd20533..00000000000 --- a/e2e/vendor/github.com/containerd/typeurl/v2/doc.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -// Package typeurl assists with managing the registration, marshaling, and -// unmarshaling of types encoded as protobuf.Any. -// -// A protobuf.Any is a proto message that can contain any arbitrary data. It -// consists of two components, a TypeUrl and a Value, and its proto definition -// looks like this: -// -// message Any { -// string type_url = 1; -// bytes value = 2; -// } -// -// The TypeUrl is used to distinguish the contents from other proto.Any -// messages. This typeurl library manages these URLs to enable automagic -// marshaling and unmarshaling of the contents. -// -// For example, consider this go struct: -// -// type Foo struct { -// Field1 string -// Field2 string -// } -// -// To use typeurl, types must first be registered. This is typically done in -// the init function -// -// func init() { -// typeurl.Register(&Foo{}, "Foo") -// } -// -// This will register the type Foo with the url path "Foo". The arguments to -// Register are variadic, and are used to construct a url path. Consider this -// example, from the github.com/containerd/containerd/client package: -// -// func init() { -// const prefix = "types.containerd.io" -// // register TypeUrls for commonly marshaled external types -// major := strconv.Itoa(specs.VersionMajor) -// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") -// // this function has more Register calls, which are elided. -// } -// -// This registers several types under a more complex url, which ends up mapping -// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other -// value for major). -// -// Once a type is registered, it can be marshaled to a proto.Any message simply -// by calling `MarshalAny`, like this: -// -// foo := &Foo{Field1: "value1", Field2: "value2"} -// anyFoo, err := typeurl.MarshalAny(foo) -// -// MarshalAny will resolve the correct URL for the type. If the type in -// question implements the proto.Message interface, then it will be marshaled -// as a proto message. Otherwise, it will be marshaled as json. This means that -// typeurl will work on any arbitrary data, whether or not it has a proto -// definition, as long as it can be serialized to json. -// -// To unmarshal, the process is simply inverse: -// -// iface, err := typeurl.UnmarshalAny(anyFoo) -// foo := iface.(*Foo) -// -// The correct type is automatically chosen from the type registry, and the -// returned interface can be cast straight to that type. diff --git a/e2e/vendor/github.com/containerd/typeurl/v2/types.go b/e2e/vendor/github.com/containerd/typeurl/v2/types.go deleted file mode 100644 index efc405ddd48..00000000000 --- a/e2e/vendor/github.com/containerd/typeurl/v2/types.go +++ /dev/null @@ -1,307 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -import ( - "encoding/json" - "errors" - "fmt" - "path" - "reflect" - "sync" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/types/known/anypb" -) - -var ( - mu sync.RWMutex - registry = make(map[reflect.Type]string) - handlers []handler -) - -type handler interface { - Marshaller(interface{}) func() ([]byte, error) - Unmarshaller(interface{}) func([]byte) error - TypeURL(interface{}) string - GetType(url string) reflect.Type -} - -// Definitions of common error types used throughout typeurl. -// -// These error types are used with errors.Wrap and errors.Wrapf to add context -// to an error. -// -// To detect an error class, use errors.Is() functions to tell whether an -// error is of this type. - -var ( - ErrNotFound = errors.New("not found") -) - -// Any contains an arbitrary protcol buffer message along with its type. -// -// While there is google.golang.org/protobuf/types/known/anypb.Any, -// we'd like to have our own to hide the underlying protocol buffer -// implementations from containerd clients. -// -// https://developers.google.com/protocol-buffers/docs/proto3#any -type Any interface { - // GetTypeUrl returns a URL/resource name that uniquely identifies - // the type of the serialized protocol buffer message. - GetTypeUrl() string - - // GetValue returns a valid serialized protocol buffer of the type that - // GetTypeUrl() indicates. - GetValue() []byte -} - -type anyType struct { - typeURL string - value []byte -} - -func (a *anyType) GetTypeUrl() string { - if a == nil { - return "" - } - return a.typeURL -} - -func (a *anyType) GetValue() []byte { - if a == nil { - return nil - } - return a.value -} - -// Register a type with a base URL for JSON marshaling. When the MarshalAny and -// UnmarshalAny functions are called they will treat the Any type value as JSON. -// To use protocol buffers for handling the Any value the proto.Register -// function should be used instead of this function. -func Register(v interface{}, args ...string) { - var ( - t = tryDereference(v) - p = path.Join(args...) - ) - mu.Lock() - defer mu.Unlock() - if et, ok := registry[t]; ok { - if et != p { - panic(fmt.Errorf("type registered with alternate path %q != %q", et, p)) - } - return - } - registry[t] = p -} - -// TypeURL returns the type url for a registered type. -func TypeURL(v interface{}) (string, error) { - mu.RLock() - u, ok := registry[tryDereference(v)] - mu.RUnlock() - if !ok { - switch t := v.(type) { - case proto.Message: - return string(t.ProtoReflect().Descriptor().FullName()), nil - default: - for _, h := range handlers { - if u := h.TypeURL(v); u != "" { - return u, nil - } - } - return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound) - } - } - return u, nil -} - -// Is returns true if the type of the Any is the same as v. -func Is(any Any, v interface{}) bool { - if any == nil { - return false - } - // call to check that v is a pointer - tryDereference(v) - url, err := TypeURL(v) - if err != nil { - return false - } - return any.GetTypeUrl() == url -} - -// MarshalAny marshals the value v into an any with the correct TypeUrl. -// If the provided object is already a proto.Any message, then it will be -// returned verbatim. If it is of type proto.Message, it will be marshaled as a -// protocol buffer. Otherwise, the object will be marshaled to json. -func MarshalAny(v interface{}) (Any, error) { - var marshal func(v interface{}) ([]byte, error) - switch t := v.(type) { - case Any: - // avoid reserializing the type if we have an any. - return t, nil - case proto.Message: - marshal = func(v interface{}) ([]byte, error) { - return proto.Marshal(t) - } - default: - for _, h := range handlers { - if m := h.Marshaller(v); m != nil { - marshal = func(v interface{}) ([]byte, error) { - return m() - } - break - } - } - - if marshal == nil { - marshal = json.Marshal - } - } - - url, err := TypeURL(v) - if err != nil { - return nil, err - } - - data, err := marshal(v) - if err != nil { - return nil, err - } - return &anyType{ - typeURL: url, - value: data, - }, nil -} - -// UnmarshalAny unmarshals the any type into a concrete type. -func UnmarshalAny(any Any) (interface{}, error) { - return UnmarshalByTypeURL(any.GetTypeUrl(), any.GetValue()) -} - -// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type. -func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) { - return unmarshal(typeURL, value, nil) -} - -// UnmarshalTo unmarshals the any type into a concrete type passed in the out -// argument. It is identical to UnmarshalAny, but lets clients provide a -// destination type through the out argument. -func UnmarshalTo(any Any, out interface{}) error { - return UnmarshalToByTypeURL(any.GetTypeUrl(), any.GetValue(), out) -} - -// UnmarshalToByTypeURL unmarshals the given type and value into a concrete type passed -// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients -// provide a destination type through the out argument. -func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { - _, err := unmarshal(typeURL, value, out) - return err -} - -// MarshalProto converts typeurl.Any to google.golang.org/protobuf/types/known/anypb.Any. -func MarshalProto(from Any) *anypb.Any { - if from == nil { - return nil - } - - if pbany, ok := from.(*anypb.Any); ok { - return pbany - } - - return &anypb.Any{ - TypeUrl: from.GetTypeUrl(), - Value: from.GetValue(), - } -} - -// MarshalAnyToProto converts an arbitrary interface to google.golang.org/protobuf/types/known/anypb.Any. -func MarshalAnyToProto(from interface{}) (*anypb.Any, error) { - anyType, err := MarshalAny(from) - if err != nil { - return nil, err - } - return MarshalProto(anyType), nil -} - -func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - t, err := getTypeByUrl(typeURL) - if err != nil { - return nil, err - } - - if v == nil { - v = reflect.New(t).Interface() - } else { - // Validate interface type provided by client - vURL, err := TypeURL(v) - if err != nil { - return nil, err - } - if typeURL != vURL { - return nil, fmt.Errorf("can't unmarshal type %q to output %q", typeURL, vURL) - } - } - - pm, ok := v.(proto.Message) - if ok { - return v, proto.Unmarshal(value, pm) - } - - for _, h := range handlers { - if unmarshal := h.Unmarshaller(v); unmarshal != nil { - return v, unmarshal(value) - } - } - - // fallback to json unmarshaller - return v, json.Unmarshal(value, v) -} - -func getTypeByUrl(url string) (reflect.Type, error) { - mu.RLock() - for t, u := range registry { - if u == url { - mu.RUnlock() - return t, nil - } - } - mu.RUnlock() - mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) - if err != nil { - if errors.Is(err, protoregistry.NotFound) { - for _, h := range handlers { - if t := h.GetType(url); t != nil { - return t, nil - } - } - } - return nil, fmt.Errorf("type with url %s: %w", url, ErrNotFound) - } - empty := mt.New().Interface() - return reflect.TypeOf(empty).Elem(), nil -} - -func tryDereference(v interface{}) reflect.Type { - t := reflect.TypeOf(v) - if t.Kind() == reflect.Ptr { - // require check of pointer but dereference to register - return t.Elem() - } - panic("v is not a pointer to a type") -} diff --git a/e2e/vendor/github.com/containerd/typeurl/v2/types_gogo.go b/e2e/vendor/github.com/containerd/typeurl/v2/types_gogo.go deleted file mode 100644 index fa293323bef..00000000000 --- a/e2e/vendor/github.com/containerd/typeurl/v2/types_gogo.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build !no_gogo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -import ( - "reflect" - - gogoproto "github.com/gogo/protobuf/proto" -) - -func init() { - handlers = append(handlers, gogoHandler{}) -} - -type gogoHandler struct{} - -func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) { - pm, ok := v.(gogoproto.Message) - if !ok { - return nil - } - return func() ([]byte, error) { - return gogoproto.Marshal(pm) - } -} - -func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error { - pm, ok := v.(gogoproto.Message) - if !ok { - return nil - } - - return func(dt []byte) error { - return gogoproto.Unmarshal(dt, pm) - } -} - -func (gogoHandler) TypeURL(v interface{}) string { - pm, ok := v.(gogoproto.Message) - if !ok { - return "" - } - return gogoproto.MessageName(pm) -} - -func (gogoHandler) GetType(url string) reflect.Type { - t := gogoproto.MessageType(url) - if t == nil { - return nil - } - return t.Elem() -} diff --git a/e2e/vendor/github.com/coreos/go-systemd/v22/LICENSE b/e2e/vendor/github.com/coreos/go-systemd/v22/LICENSE deleted file mode 100644 index 37ec93a14fd..00000000000 --- a/e2e/vendor/github.com/coreos/go-systemd/v22/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/github.com/coreos/go-systemd/v22/NOTICE b/e2e/vendor/github.com/coreos/go-systemd/v22/NOTICE deleted file mode 100644 index 23a0ada2fbb..00000000000 --- a/e2e/vendor/github.com/coreos/go-systemd/v22/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go deleted file mode 100644 index 147f756fe24..00000000000 --- a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ -package dbus - -import ( - "context" - "encoding/hex" - "fmt" - "os" - "strconv" - "strings" - "sync" - - "github.com/godbus/dbus/v5" -) - -const ( - alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` - num = `0123456789` - alphanum = alpha + num - signalBuffer = 100 -) - -// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped -func needsEscape(i int, b byte) bool { - // Escape everything that is not a-z-A-Z-0-9 - // Also escape 0-9 if it's the first character - return strings.IndexByte(alphanum, b) == -1 || - (i == 0 && strings.IndexByte(num, b) != -1) -} - -// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the -// rules that systemd uses for serializing special characters. -func PathBusEscape(path string) string { - // Special case the empty string - if len(path) == 0 { - return "_" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if needsEscape(i, c) { - e := fmt.Sprintf("_%x", c) - n = append(n, []byte(e)...) - } else { - n = append(n, c) - } - } - return string(n) -} - -// pathBusUnescape is the inverse of PathBusEscape. -func pathBusUnescape(path string) string { - if path == "_" { - return "" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if c == '_' && i+2 < len(path) { - res, err := hex.DecodeString(path[i+1 : i+3]) - if err == nil { - n = append(n, res...) - } - i += 2 - } else { - n = append(n, c) - } - } - return string(n) -} - -// Conn is a connection to systemd's dbus endpoint. -type Conn struct { - // sysconn/sysobj are only used to call dbus methods - sysconn *dbus.Conn - sysobj dbus.BusObject - - // sigconn/sigobj are only used to receive dbus signals - sigconn *dbus.Conn - sigobj dbus.BusObject - - jobListener struct { - jobs map[dbus.ObjectPath]chan<- string - sync.Mutex - } - subStateSubscriber struct { - updateCh chan<- *SubStateUpdate - errCh chan<- error - sync.Mutex - ignore map[dbus.ObjectPath]int64 - cleanIgnore int64 - } - propertiesSubscriber struct { - updateCh chan<- *PropertiesUpdate - errCh chan<- error - sync.Mutex - } -} - -// Deprecated: use NewWithContext instead. -func New() (*Conn, error) { - return NewWithContext(context.Background()) -} - -// NewWithContext establishes a connection to any available bus and authenticates. -// Callers should call Close() when done with the connection. -func NewWithContext(ctx context.Context) (*Conn, error) { - conn, err := NewSystemConnectionContext(ctx) - if err != nil && os.Geteuid() == 0 { - return NewSystemdConnectionContext(ctx) - } - return conn, err -} - -// Deprecated: use NewSystemConnectionContext instead. -func NewSystemConnection() (*Conn, error) { - return NewSystemConnectionContext(context.Background()) -} - -// NewSystemConnectionContext establishes a connection to the system bus and authenticates. -// Callers should call Close() when done with the connection. -func NewSystemConnectionContext(ctx context.Context) (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate) - }) -} - -// Deprecated: use NewUserConnectionContext instead. -func NewUserConnection() (*Conn, error) { - return NewUserConnectionContext(context.Background()) -} - -// NewUserConnectionContext establishes a connection to the session bus and -// authenticates. This can be used to connect to systemd user instances. -// Callers should call Close() when done with the connection. -func NewUserConnectionContext(ctx context.Context) (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate) - }) -} - -// Deprecated: use NewSystemdConnectionContext instead. -func NewSystemdConnection() (*Conn, error) { - return NewSystemdConnectionContext(context.Background()) -} - -// NewSystemdConnectionContext establishes a private, direct connection to systemd. -// This can be used for communicating with systemd without a dbus daemon. -// Callers should call Close() when done with the connection. -func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - // We skip Hello when talking directly to systemd. - return dbusAuthConnection(ctx, func(opts ...dbus.ConnOption) (*dbus.Conn, error) { - return dbus.Dial("unix:path=/run/systemd/private", opts...) - }) - }) -} - -// Close closes an established connection. -func (c *Conn) Close() { - c.sysconn.Close() - c.sigconn.Close() -} - -// Connected returns whether conn is connected -func (c *Conn) Connected() bool { - return c.sysconn.Connected() && c.sigconn.Connected() -} - -// NewConnection establishes a connection to a bus using a caller-supplied function. -// This allows connecting to remote buses through a user-supplied mechanism. -// The supplied function may be called multiple times, and should return independent connections. -// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, -// and any authentication should be handled by the function. -func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { - sysconn, err := dialBus() - if err != nil { - return nil, err - } - - sigconn, err := dialBus() - if err != nil { - sysconn.Close() - return nil, err - } - - c := &Conn{ - sysconn: sysconn, - sysobj: systemdObject(sysconn), - sigconn: sigconn, - sigobj: systemdObject(sigconn), - } - - c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) - c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) - - // Setup the listeners on jobs so that we can get completions - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") - - c.dispatch() - return c, nil -} - -// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager -// interface. The value is returned in its string representation, as defined at -// https://developer.gnome.org/glib/unstable/gvariant-text.html. -func (c *Conn) GetManagerProperty(prop string) (string, error) { - variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) - if err != nil { - return "", err - } - return variant.String(), nil -} - -func dbusAuthConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := createBus(dbus.WithContext(ctx)) - if err != nil { - return nil, err - } - - // Only use EXTERNAL method, and hardcode the uid (not username) - // to avoid a username lookup (which requires a dynamically linked - // libc) - methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} - - err = conn.Auth(methods) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func dbusAuthHelloConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := dbusAuthConnection(ctx, createBus) - if err != nil { - return nil, err - } - - if err = conn.Hello(); err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func systemdObject(conn *dbus.Conn) dbus.BusObject { - return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) -} diff --git a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go deleted file mode 100644 index 074148cb4d6..00000000000 --- a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ /dev/null @@ -1,864 +0,0 @@ -// Copyright 2015, 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "context" - "errors" - "fmt" - "path" - "strconv" - - "github.com/godbus/dbus/v5" -) - -// Who can be used to specify which process to kill in the unit via the KillUnitWithTarget API -type Who string - -const ( - // All sends the signal to all processes in the unit - All Who = "all" - // Main sends the signal to the main process of the unit - Main Who = "main" - // Control sends the signal to the control process of the unit - Control Who = "control" -) - -func (c *Conn) jobComplete(signal *dbus.Signal) { - var id uint32 - var job dbus.ObjectPath - var unit string - var result string - dbus.Store(signal.Body, &id, &job, &unit, &result) - c.jobListener.Lock() - out, ok := c.jobListener.jobs[job] - if ok { - out <- result - delete(c.jobListener.jobs, job) - } - c.jobListener.Unlock() -} - -func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) { - if ch != nil { - c.jobListener.Lock() - defer c.jobListener.Unlock() - } - - var p dbus.ObjectPath - err := c.sysobj.CallWithContext(ctx, job, 0, args...).Store(&p) - if err != nil { - return 0, err - } - - if ch != nil { - c.jobListener.jobs[p] = ch - } - - // ignore error since 0 is fine if conversion fails - jobID, _ := strconv.Atoi(path.Base(string(p))) - - return jobID, nil -} - -// Deprecated: use StartUnitContext instead. -func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.StartUnitContext(context.Background(), name, mode, ch) -} - -// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise -// specified by the mode string). -// -// Takes the unit to activate, plus a mode string. The mode needs to be one of -// replace, fail, isolate, ignore-dependencies, ignore-requirements. If -// "replace" the call will start the unit and its dependencies, possibly -// replacing already queued jobs that conflict with this. If "fail" the call -// will start the unit and its dependencies, but will fail if this would change -// an already queued job. If "isolate" the call will start the unit in question -// and terminate all units that aren't dependencies of it. If -// "ignore-dependencies" it will start a unit but ignore all its dependencies. -// If "ignore-requirements" it will start a unit but only ignore the -// requirement dependencies. It is not recommended to make use of the latter -// two options. -// -// If the provided channel is non-nil, a result string will be sent to it upon -// job completion: one of done, canceled, timeout, failed, dependency, skipped. -// done indicates successful execution of a job. canceled indicates that a job -// has been canceled before it finished execution. timeout indicates that the -// job timeout was reached. failed indicates that the job failed. dependency -// indicates that a job this job has been depending on failed and the job hence -// has been removed too. skipped indicates that a job was skipped because it -// didn't apply to the units current state. -// -// If no error occurs, the ID of the underlying systemd job will be returned. There -// does exist the possibility for no error to be returned, but for the returned job -// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint -// should not be considered authoritative. -// -// If an error does occur, it will be returned to the user alongside a job ID of 0. -func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) -} - -// Deprecated: use StopUnitContext instead. -func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { - return c.StopUnitContext(context.Background(), name, mode, ch) -} - -// StopUnitContext is similar to StartUnitContext, but stops the specified unit -// rather than starting it. -func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) -} - -// Deprecated: use ReloadUnitContext instead. -func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { - return c.ReloadUnitContext(context.Background(), name, mode, ch) -} - -// ReloadUnitContext reloads a unit. Reloading is done only if the unit -// is already running, and fails otherwise. -func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) -} - -// Deprecated: use RestartUnitContext instead. -func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.RestartUnitContext(context.Background(), name, mode, ch) -} - -// RestartUnitContext restarts a service. If a service is restarted that isn't -// running it will be started. -func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) -} - -// Deprecated: use TryRestartUnitContext instead. -func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.TryRestartUnitContext(context.Background(), name, mode, ch) -} - -// TryRestartUnitContext is like RestartUnitContext, except that a service that -// isn't running is not affected by the restart. -func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) -} - -// Deprecated: use ReloadOrRestartUnitContext instead. -func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch) -} - -// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use -// a restart otherwise. -func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) -} - -// Deprecated: use ReloadOrTryRestartUnitContext instead. -func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch) -} - -// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it, -// and use a "Try" flavored restart otherwise. -func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) -} - -// Deprecated: use StartTransientUnitContext instead. -func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch) -} - -// StartTransientUnitContext may be used to create and start a transient unit, which -// will be released as soon as it is not running or referenced anymore or the -// system is rebooted. name is the unit name including suffix, and must be -// unique. mode is the same as in StartUnitContext, properties contains properties -// of the unit. -func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) -} - -// Deprecated: use KillUnitContext instead. -func (c *Conn) KillUnit(name string, signal int32) { - c.KillUnitContext(context.Background(), name, signal) -} - -// KillUnitContext takes the unit name and a UNIX signal number to send. -// All of the unit's processes are killed. -func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { - c.KillUnitWithTarget(ctx, name, All, signal) -} - -// KillUnitWithTarget is like KillUnitContext, but allows you to specify which -// process in the unit to send the signal to. -func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() -} - -// Deprecated: use ResetFailedUnitContext instead. -func (c *Conn) ResetFailedUnit(name string) error { - return c.ResetFailedUnitContext(context.Background(), name) -} - -// ResetFailedUnitContext resets the "failed" state of a specific unit. -func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() -} - -// Deprecated: use SystemStateContext instead. -func (c *Conn) SystemState() (*Property, error) { - return c.SystemStateContext(context.Background()) -} - -// SystemStateContext returns the systemd state. Equivalent to -// systemctl is-system-running. -func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { - var err error - var prop dbus.Variant - - obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: "SystemState", Value: prop}, nil -} - -// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. -func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { - var err error - var props map[string]dbus.Variant - - if !path.IsValid() { - return nil, fmt.Errorf("invalid unit name: %v", path) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) - if err != nil { - return nil, err - } - - out := make(map[string]interface{}, len(props)) - for k, v := range props { - out[k] = v.Value() - } - - return out, nil -} - -// Deprecated: use GetUnitPropertiesContext instead. -func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { - return c.GetUnitPropertiesContext(context.Background(), unit) -} - -// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of -// its dbus object properties. -func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") -} - -// Deprecated: use GetUnitPathPropertiesContext instead. -func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { - return c.GetUnitPathPropertiesContext(context.Background(), path) -} - -// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all -// of its dbus object properties. -func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { - return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") -} - -// Deprecated: use GetAllPropertiesContext instead. -func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { - return c.GetAllPropertiesContext(context.Background(), unit) -} - -// GetAllPropertiesContext takes the (unescaped) unit name and returns all of -// its dbus object properties. -func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(ctx, path, "") -} - -func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface string, propertyName string) (*Property, error) { - var err error - var prop dbus.Variant - - path := unitPath(unit) - if !path.IsValid() { - return nil, errors.New("invalid unit name: " + unit) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: propertyName, Value: prop}, nil -} - -// Deprecated: use GetUnitPropertyContext instead. -func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { - return c.GetUnitPropertyContext(context.Background(), unit, propertyName) -} - -// GetUnitPropertyContext takes an (unescaped) unit name, and a property name, -// and returns the property value. -func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) { - return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName) -} - -// Deprecated: use GetServicePropertyContext instead. -func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { - return c.GetServicePropertyContext(context.Background(), service, propertyName) -} - -// GetServiceProperty returns property for given service name and property name. -func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { - return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) -} - -// Deprecated: use GetUnitTypePropertiesContext instead. -func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { - return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) -} - -// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. -// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. -// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. -func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) -} - -// Deprecated: use SetUnitPropertiesContext instead. -func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) -} - -// SetUnitPropertiesContext may be used to modify certain unit properties at runtime. -// Not all properties may be changed at runtime, but many resource management -// settings (primarily those in systemd.cgroup(5)) may. The changes are applied -// instantly, and stored on disk for future boots, unless runtime is true, in which -// case the settings only apply until the next reboot. name is the name of the unit -// to modify. properties are the settings to set, encoded as an array of property -// name and value pairs. -func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() -} - -// Deprecated: use GetUnitTypePropertyContext instead. -func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { - return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName) -} - -// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type, -// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext. -func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) { - return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName) -} - -type UnitStatus struct { - Name string // The primary unit name as string - Description string // The human readable description string - LoadState string // The load state (i.e. whether the unit file has been loaded successfully) - ActiveState string // The active state (i.e. whether the unit is currently started or not) - SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) - Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. - Path dbus.ObjectPath // The unit object path - JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise - JobType string // The job type as string - JobPath dbus.ObjectPath // The job object path -} - -type storeFunc func(retvalues ...interface{}) error - -func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]UnitStatus, len(result)) - statusInterface := make([]interface{}, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - err = dbus.Store(resultInterface, statusInterface...) - if err != nil { - return nil, err - } - - return status, nil -} - -// GetUnitByPID returns the unit object path of the unit a process ID -// belongs to. It takes a UNIX PID and returns the object path. The PID must -// refer to an existing system process -func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) { - var result dbus.ObjectPath - - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result) - - return result, err -} - -// GetUnitNameByPID returns the name of the unit a process ID belongs to. It -// takes a UNIX PID and returns the object path. The PID must refer to an -// existing system process -func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) { - path, err := c.GetUnitByPID(ctx, pid) - if err != nil { - return "", err - } - - return unitName(path), nil -} - -// Deprecated: use ListUnitsContext instead. -func (c *Conn) ListUnits() ([]UnitStatus, error) { - return c.ListUnitsContext(context.Background()) -} - -// ListUnitsContext returns an array with all currently loaded units. Note that -// units may be known by multiple names at the same time, and hence there might -// be more unit names loaded than actual units behind them. -// Also note that a unit is only loaded if it is active and/or enabled. -// Units that are both disabled and inactive will thus not be returned. -func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) -} - -// Deprecated: use ListUnitsFilteredContext instead. -func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { - return c.ListUnitsFilteredContext(context.Background(), states) -} - -// ListUnitsFilteredContext returns an array with units filtered by state. -// It takes a list of units' statuses to filter. -func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) -} - -// Deprecated: use ListUnitsByPatternsContext instead. -func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { - return c.ListUnitsByPatternsContext(context.Background(), states, patterns) -} - -// ListUnitsByPatternsContext returns an array with units. -// It takes a list of units' statuses and names to filter. -// Note that units may be known by multiple names at the same time, -// and hence there might be more unit names loaded than actual units behind them. -func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) -} - -// Deprecated: use ListUnitsByNamesContext instead. -func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { - return c.ListUnitsByNamesContext(context.Background(), units) -} - -// ListUnitsByNamesContext returns an array with units. It takes a list of units' -// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext -// method, this method returns statuses even for inactive or non-existing -// units. Input array should contain exact unit names, but not patterns. -// -// Requires systemd v230 or higher. -func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) -} - -type UnitFile struct { - Path string - Type string -} - -func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - files := make([]UnitFile, len(result)) - fileInterface := make([]interface{}, len(files)) - for i := range files { - fileInterface[i] = &files[i] - } - - err = dbus.Store(resultInterface, fileInterface...) - if err != nil { - return nil, err - } - - return files, nil -} - -// Deprecated: use ListUnitFilesContext instead. -func (c *Conn) ListUnitFiles() ([]UnitFile, error) { - return c.ListUnitFilesContext(context.Background()) -} - -// ListUnitFiles returns an array of all available units on disk. -func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) -} - -// Deprecated: use ListUnitFilesByPatternsContext instead. -func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { - return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns) -} - -// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. -func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) -} - -type LinkUnitFileChange EnableUnitFileChange - -// Deprecated: use LinkUnitFilesContext instead. -func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - return c.LinkUnitFilesContext(context.Background(), files, runtime, force) -} - -// LinkUnitFilesContext links unit files (that are located outside of the -// usual unit search paths) into the unit search path. -// -// It takes a list of absolute paths to unit files to link and two -// booleans. -// -// The first boolean controls whether the unit shall be -// enabled for runtime only (true, /run), or persistently (false, -// /etc). -// -// The second controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns a list of the changes made. The list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]LinkUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -// Deprecated: use EnableUnitFilesContext instead. -func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - return c.EnableUnitFilesContext(context.Background(), files, runtime, force) -} - -// EnableUnitFilesContext may be used to enable one or more units in the system -// (by creating symlinks to them in /etc or /run). -// -// It takes a list of unit files to enable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and two booleans: the first controls whether the unit shall -// be enabled for runtime only (true, /run), or persistently (false, /etc). -// The second one controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns one boolean and an array with the changes made. The -// boolean signals whether the unit files contained any enablement -// information (i.e. an [Install]) section. The changes list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - var carries_install_info bool - - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) - if err != nil { - return false, nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]EnableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return false, nil, err - } - - return carries_install_info, changes, nil -} - -type EnableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use DisableUnitFilesContext instead. -func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { - return c.DisableUnitFilesContext(context.Background(), files, runtime) -} - -// DisableUnitFilesContext may be used to disable one or more units in the -// system (by removing symlinks to them from /etc or /run). -// -// It takes a list of unit files to disable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and one boolean: whether the unit was enabled for runtime -// only (true, /run), or persistently (false, /etc). -// -// This call returns an array with the changes made. The changes list -// consists of structures with three strings: the type of the change (one of -// symlink or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]DisableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type DisableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use MaskUnitFilesContext instead. -func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - return c.MaskUnitFilesContext(context.Background(), files, runtime, force) -} - -// MaskUnitFilesContext masks one or more units in the system. -// -// The files argument contains a list of units to mask (either just file names -// or full absolute paths if the unit files are residing outside the usual unit -// search paths). -// -// The runtime argument is used to specify whether the unit was enabled for -// runtime only (true, /run/systemd/..), or persistently (false, -// /etc/systemd/..). -func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]MaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type MaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use UnmaskUnitFilesContext instead. -func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - return c.UnmaskUnitFilesContext(context.Background(), files, runtime) -} - -// UnmaskUnitFilesContext unmasks one or more units in the system. -// -// It takes the list of unit files to mask (either just file names or full -// absolute paths if the unit files are residing outside the usual unit search -// paths), and a boolean runtime flag to specify whether the unit was enabled -// for runtime only (true, /run/systemd/..), or persistently (false, -// /etc/systemd/..). -func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]UnmaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type UnmaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use ReloadContext instead. -func (c *Conn) Reload() error { - return c.ReloadContext(context.Background()) -} - -// ReloadContext instructs systemd to scan for and reload unit files. This is -// an equivalent to systemctl daemon-reload. -func (c *Conn) ReloadContext(ctx context.Context) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store() -} - -func unitPath(name string) dbus.ObjectPath { - return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) -} - -// unitName returns the unescaped base element of the supplied escaped path. -func unitName(dpath dbus.ObjectPath) string { - return pathBusUnescape(path.Base(string(dpath))) -} - -// JobStatus holds a currently queued job definition. -type JobStatus struct { - Id uint32 // The numeric job id - Unit string // The primary unit name for this job - JobType string // The job type as string - Status string // The job state as string - JobPath dbus.ObjectPath // The job object path - UnitPath dbus.ObjectPath // The unit object path -} - -// Deprecated: use ListJobsContext instead. -func (c *Conn) ListJobs() ([]JobStatus, error) { - return c.ListJobsContext(context.Background()) -} - -// ListJobsContext returns an array with all currently queued jobs. -func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { - return c.listJobsInternal(ctx) -} - -func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { - result := make([][]interface{}, 0) - if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]JobStatus, len(result)) - statusInterface := make([]interface{}, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - if err := dbus.Store(resultInterface, statusInterface...); err != nil { - return nil, err - } - - return status, nil -} - -// Freeze the cgroup associated with the unit. -// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2. -func (c *Conn) FreezeUnit(ctx context.Context, unit string) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store() -} - -// Unfreeze the cgroup associated with the unit. -func (c *Conn) ThawUnit(ctx context.Context, unit string) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store() -} diff --git a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go b/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go deleted file mode 100644 index fb42b627338..00000000000 --- a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "github.com/godbus/dbus/v5" -) - -// From the systemd docs: -// -// The properties array of StartTransientUnit() may take many of the settings -// that may also be configured in unit files. Not all parameters are currently -// accepted though, but we plan to cover more properties with future release. -// Currently you may set the Description, Slice and all dependency types of -// units, as well as RemainAfterExit, ExecStart for service units, -// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, -// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, -// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, -// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map -// directly to their counterparts in unit files and as normal D-Bus object -// properties. The exception here is the PIDs field of scope units which is -// used for construction of the scope only and specifies the initial PIDs to -// add to the scope object. - -type Property struct { - Name string - Value dbus.Variant -} - -type PropertyCollection struct { - Name string - Properties []Property -} - -type execStart struct { - Path string // the binary path to execute - Args []string // an array with all arguments to pass to the executed command, starting with argument 0 - UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly -} - -// PropExecStart sets the ExecStart service property. The first argument is a -// slice with the binary path to execute followed by the arguments to pass to -// the executed command. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= -func PropExecStart(command []string, uncleanIsFailure bool) Property { - execStarts := []execStart{ - { - Path: command[0], - Args: command, - UncleanIsFailure: uncleanIsFailure, - }, - } - - return Property{ - Name: "ExecStart", - Value: dbus.MakeVariant(execStarts), - } -} - -// PropRemainAfterExit sets the RemainAfterExit service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= -func PropRemainAfterExit(b bool) Property { - return Property{ - Name: "RemainAfterExit", - Value: dbus.MakeVariant(b), - } -} - -// PropType sets the Type service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= -func PropType(t string) Property { - return Property{ - Name: "Type", - Value: dbus.MakeVariant(t), - } -} - -// PropDescription sets the Description unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= -func PropDescription(desc string) Property { - return Property{ - Name: "Description", - Value: dbus.MakeVariant(desc), - } -} - -func propDependency(name string, units []string) Property { - return Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} - -// PropRequires sets the Requires unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= -func PropRequires(units ...string) Property { - return propDependency("Requires", units) -} - -// PropRequiresOverridable sets the RequiresOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= -func PropRequiresOverridable(units ...string) Property { - return propDependency("RequiresOverridable", units) -} - -// PropRequisite sets the Requisite unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= -func PropRequisite(units ...string) Property { - return propDependency("Requisite", units) -} - -// PropRequisiteOverridable sets the RequisiteOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= -func PropRequisiteOverridable(units ...string) Property { - return propDependency("RequisiteOverridable", units) -} - -// PropWants sets the Wants unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= -func PropWants(units ...string) Property { - return propDependency("Wants", units) -} - -// PropBindsTo sets the BindsTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= -func PropBindsTo(units ...string) Property { - return propDependency("BindsTo", units) -} - -// PropRequiredBy sets the RequiredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= -func PropRequiredBy(units ...string) Property { - return propDependency("RequiredBy", units) -} - -// PropRequiredByOverridable sets the RequiredByOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= -func PropRequiredByOverridable(units ...string) Property { - return propDependency("RequiredByOverridable", units) -} - -// PropWantedBy sets the WantedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= -func PropWantedBy(units ...string) Property { - return propDependency("WantedBy", units) -} - -// PropBoundBy sets the BoundBy unit property. See -// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= -func PropBoundBy(units ...string) Property { - return propDependency("BoundBy", units) -} - -// PropConflicts sets the Conflicts unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= -func PropConflicts(units ...string) Property { - return propDependency("Conflicts", units) -} - -// PropConflictedBy sets the ConflictedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= -func PropConflictedBy(units ...string) Property { - return propDependency("ConflictedBy", units) -} - -// PropBefore sets the Before unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= -func PropBefore(units ...string) Property { - return propDependency("Before", units) -} - -// PropAfter sets the After unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= -func PropAfter(units ...string) Property { - return propDependency("After", units) -} - -// PropOnFailure sets the OnFailure unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= -func PropOnFailure(units ...string) Property { - return propDependency("OnFailure", units) -} - -// PropTriggers sets the Triggers unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= -func PropTriggers(units ...string) Property { - return propDependency("Triggers", units) -} - -// PropTriggeredBy sets the TriggeredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= -func PropTriggeredBy(units ...string) Property { - return propDependency("TriggeredBy", units) -} - -// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= -func PropPropagatesReloadTo(units ...string) Property { - return propDependency("PropagatesReloadTo", units) -} - -// PropRequiresMountsFor sets the RequiresMountsFor unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= -func PropRequiresMountsFor(units ...string) Property { - return propDependency("RequiresMountsFor", units) -} - -// PropSlice sets the Slice unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= -func PropSlice(slice string) Property { - return Property{ - Name: "Slice", - Value: dbus.MakeVariant(slice), - } -} - -// PropPids sets the PIDs field of scope units used in the initial construction -// of the scope only and specifies the initial PIDs to add to the scope object. -// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties -func PropPids(pids ...uint32) Property { - return Property{ - Name: "PIDs", - Value: dbus.MakeVariant(pids), - } -} diff --git a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/set.go b/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/set.go deleted file mode 100644 index 17c5d485657..00000000000 --- a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/set.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -type set struct { - data map[string]bool -} - -func (s *set) Add(value string) { - s.data[value] = true -} - -func (s *set) Remove(value string) { - delete(s.data, value) -} - -func (s *set) Contains(value string) (exists bool) { - _, exists = s.data[value] - return -} - -func (s *set) Length() int { - return len(s.data) -} - -func (s *set) Values() (values []string) { - for val := range s.data { - values = append(values, val) - } - return -} - -func newSet() *set { - return &set{make(map[string]bool)} -} diff --git a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go deleted file mode 100644 index 7e370fea212..00000000000 --- a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "errors" - "log" - "time" - - "github.com/godbus/dbus/v5" -) - -const ( - cleanIgnoreInterval = int64(10 * time.Second) - ignoreInterval = int64(30 * time.Millisecond) -) - -// Subscribe sets up this connection to subscribe to all systemd dbus events. -// This is required before calling SubscribeUnits. When the connection closes -// systemd will automatically stop sending signals so there is no need to -// explicitly call Unsubscribe(). -func (c *Conn) Subscribe() error { - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") - - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() -} - -// Unsubscribe this connection from systemd dbus events. -func (c *Conn) Unsubscribe() error { - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() -} - -func (c *Conn) dispatch() { - ch := make(chan *dbus.Signal, signalBuffer) - - c.sigconn.Signal(ch) - - go func() { - for { - signal, ok := <-ch - if !ok { - return - } - - if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { - c.jobComplete(signal) - } - - if c.subStateSubscriber.updateCh == nil && - c.propertiesSubscriber.updateCh == nil { - continue - } - - var unitPath dbus.ObjectPath - switch signal.Name { - case "org.freedesktop.systemd1.Manager.JobRemoved": - unitName := signal.Body[2].(string) - c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) - case "org.freedesktop.systemd1.Manager.UnitNew": - unitPath = signal.Body[1].(dbus.ObjectPath) - case "org.freedesktop.DBus.Properties.PropertiesChanged": - if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { - unitPath = signal.Path - - if len(signal.Body) >= 2 { - if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { - c.sendPropertiesUpdate(unitPath, changed) - } - } - } - } - - if unitPath == dbus.ObjectPath("") { - continue - } - - c.sendSubStateUpdate(unitPath) - } - }() -} - -// SubscribeUnits returns two unbuffered channels which will receive all changed units every -// interval. Deleted units are sent as nil. -func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { - return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) -} - -// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer -// size of the channels, the comparison function for detecting changes and a filter -// function for cutting down on the noise that your channel receives. -func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { - old := make(map[string]*UnitStatus) - statusChan := make(chan map[string]*UnitStatus, buffer) - errChan := make(chan error, buffer) - - go func() { - for { - timerChan := time.After(interval) - - units, err := c.ListUnits() - if err == nil { - cur := make(map[string]*UnitStatus) - for i := range units { - if filterUnit != nil && filterUnit(units[i].Name) { - continue - } - cur[units[i].Name] = &units[i] - } - - // add all new or changed units - changed := make(map[string]*UnitStatus) - for n, u := range cur { - if oldU, ok := old[n]; !ok || isChanged(oldU, u) { - changed[n] = u - } - delete(old, n) - } - - // add all deleted units - for oldN := range old { - changed[oldN] = nil - } - - old = cur - - if len(changed) != 0 { - statusChan <- changed - } - } else { - errChan <- err - } - - <-timerChan - } - }() - - return statusChan, errChan -} - -type SubStateUpdate struct { - UnitName string - SubState string -} - -// SetSubStateSubscriber writes to updateCh when any unit's substate changes. -// Although this writes to updateCh on every state change, the reported state -// may be more recent than the change that generated it (due to an unavoidable -// race in the systemd dbus interface). That is, this method provides a good -// way to keep a current view of all units' states, but is not guaranteed to -// show every state transition they go through. Furthermore, state changes -// will only be written to the channel with non-blocking writes. If updateCh -// is full, it attempts to write an error to errCh; if errCh is full, the error -// passes silently. -func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { - if c == nil { - msg := "nil receiver" - select { - case errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - c.subStateSubscriber.updateCh = updateCh - c.subStateSubscriber.errCh = errCh -} - -func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - - if c.subStateSubscriber.updateCh == nil { - return - } - - isIgnored := c.shouldIgnore(unitPath) - defer c.cleanIgnore() - if isIgnored { - return - } - - info, err := c.GetUnitPathProperties(unitPath) - if err != nil { - select { - case c.subStateSubscriber.errCh <- err: - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - defer c.updateIgnore(unitPath, info) - - name, ok := info["Id"].(string) - if !ok { - msg := "failed to cast info.Id" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - substate, ok := info["SubState"].(string) - if !ok { - msg := "failed to cast info.SubState" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - update := &SubStateUpdate{name, substate} - select { - case c.subStateSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} - -// The ignore functions work around a wart in the systemd dbus interface. -// Requesting the properties of an unloaded unit will cause systemd to send a -// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's -// properties on UnitNew (as that's the only indication of a new unit coming up -// for the first time), we would enter an infinite loop if we did not attempt -// to detect and ignore these spurious signals. The signal themselves are -// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an -// unloaded unit's signals for a short time after requesting its properties. -// This means that we will miss e.g. a transient unit being restarted -// *immediately* upon failure and also a transient unit being started -// immediately after requesting its status (with systemctl status, for example, -// because this causes a UnitNew signal to be sent which then causes us to fetch -// the properties). - -func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { - t, ok := c.subStateSubscriber.ignore[path] - return ok && t >= time.Now().UnixNano() -} - -func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { - loadState, ok := info["LoadState"].(string) - if !ok { - return - } - - // unit is unloaded - it will trigger bad systemd dbus behavior - if loadState == "not-found" { - c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval - } -} - -// without this, ignore would grow unboundedly over time -func (c *Conn) cleanIgnore() { - now := time.Now().UnixNano() - if c.subStateSubscriber.cleanIgnore < now { - c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval - - for p, t := range c.subStateSubscriber.ignore { - if t < now { - delete(c.subStateSubscriber.ignore, p) - } - } - } -} - -// PropertiesUpdate holds a map of a unit's changed properties -type PropertiesUpdate struct { - UnitName string - Changed map[string]dbus.Variant -} - -// SetPropertiesSubscriber writes to updateCh when any unit's properties -// change. Every property change reported by systemd will be sent; that is, no -// transitions will be "missed" (as they might be with SetSubStateSubscriber). -// However, state changes will only be written to the channel with non-blocking -// writes. If updateCh is full, it attempts to write an error to errCh; if -// errCh is full, the error passes silently. -func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - c.propertiesSubscriber.updateCh = updateCh - c.propertiesSubscriber.errCh = errCh -} - -// we don't need to worry about shouldIgnore() here because -// sendPropertiesUpdate doesn't call GetProperties() -func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - - if c.propertiesSubscriber.updateCh == nil { - return - } - - update := &PropertiesUpdate{unitName(unitPath), changedProps} - - select { - case c.propertiesSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.propertiesSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} diff --git a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go deleted file mode 100644 index 5b408d5847a..00000000000 --- a/e2e/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "time" -) - -// SubscriptionSet returns a subscription set which is like conn.Subscribe but -// can filter to only return events for a set of units. -type SubscriptionSet struct { - *set - conn *Conn -} - -func (s *SubscriptionSet) filter(unit string) bool { - return !s.Contains(unit) -} - -// Subscribe starts listening for dbus events for all of the units in the set. -// Returns channels identical to conn.SubscribeUnits. -func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { - // TODO: Make fully evented by using systemd 209 with properties changed values - return s.conn.SubscribeUnitsCustom(time.Second, 0, - mismatchUnitStatus, - func(unit string) bool { return s.filter(unit) }, - ) -} - -// NewSubscriptionSet returns a new subscription set. -func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { - return &SubscriptionSet{newSet(), conn} -} - -// mismatchUnitStatus returns true if the provided UnitStatus objects -// are not equivalent. false is returned if the objects are equivalent. -// Only the Name, Description and state-related fields are used in -// the comparison. -func mismatchUnitStatus(u1, u2 *UnitStatus) bool { - return u1.Name != u2.Name || - u1.Description != u2.Description || - u1.LoadState != u2.LoadState || - u1.ActiveState != u2.ActiveState || - u1.SubState != u2.SubState -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/e2e/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md deleted file mode 100644 index ca0e3c62c76..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ /dev/null @@ -1,256 +0,0 @@ -# Changelog # -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](http://keepachangelog.com/) -and this project adheres to [Semantic Versioning](http://semver.org/). - -## [Unreleased] ## - -## [0.4.1] - 2025-01-28 ## - -### Fixed ### -- The restrictions added for `root` paths passed to `SecureJoin` in 0.4.0 was - found to be too strict and caused some regressions when folks tried to - update, so this restriction has been relaxed to only return an error if the - path contains a `..` component. We still recommend users use `filepath.Clean` - (and even `filepath.EvalSymlinks`) on the `root` path they are using, but at - least you will no longer be punished for "trivial" unclean paths. - -## [0.4.0] - 2025-01-13 ## - -### Breaking #### -- `SecureJoin(VFS)` will now return an error if the provided `root` is not a - `filepath.Clean`'d path. - - While it is ultimately the responsibility of the caller to ensure the root is - a safe path to use, passing a path like `/symlink/..` as a root would result - in the `SecureJoin`'d path being placed in `/` even though `/symlink/..` - might be a different directory, and so we should more strongly discourage - such usage. - - All major users of `securejoin.SecureJoin` already ensure that the paths they - provide are safe (and this is ultimately a question of user error), but - removing this foot-gun is probably a good idea. Of course, this is - necessarily a breaking API change (though we expect no real users to be - affected by it). - - Thanks to [Erik Sjölund](https://github.com/eriksjolund), who initially - reported this issue as a possible security issue. - -- `MkdirAll` and `MkdirHandle` now take an `os.FileMode`-style mode argument - instead of a raw `unix.S_*`-style mode argument, which may cause compile-time - type errors depending on how you use `filepath-securejoin`. For most users, - there will be no change in behaviour aside from the type change (as the - bottom `0o777` bits are the same in both formats, and most users are probably - only using those bits). - - However, if you were using `unix.S_ISVTX` to set the sticky bit with - `MkdirAll(Handle)` you will need to switch to `os.ModeSticky` otherwise you - will get a runtime error with this update. In addition, the error message you - will get from passing `unix.S_ISUID` and `unix.S_ISGID` will be different as - they are treated as invalid bits now (note that previously passing said bits - was also an error). - -## [0.3.6] - 2024-12-17 ## - -### Compatibility ### -- The minimum Go version requirement for `filepath-securejoin` is now Go 1.18 - (we use generics internally). - - For reference, `filepath-securejoin@v0.3.0` somewhat-arbitrarily bumped the - Go version requirement to 1.21. - - While we did make some use of Go 1.21 stdlib features (and in principle Go - versions <= 1.21 are no longer even supported by upstream anymore), some - downstreams have complained that the version bump has meant that they have to - do workarounds when backporting fixes that use the new `filepath-securejoin` - API onto old branches. This is not an ideal situation, but since using this - library is probably better for most downstreams than a hand-rolled - workaround, we now have compatibility shims that allow us to build on older - Go versions. -- Lower minimum version requirement for `golang.org/x/sys` to `v0.18.0` (we - need the wrappers for `fsconfig(2)`), which should also make backporting - patches to older branches easier. - -## [0.3.5] - 2024-12-06 ## - -### Fixed ### -- `MkdirAll` will now no longer return an `EEXIST` error if two racing - processes are creating the same directory. We will still verify that the path - is a directory, but this will avoid spurious errors when multiple threads or - programs are trying to `MkdirAll` the same path. opencontainers/runc#4543 - -## [0.3.4] - 2024-10-09 ## - -### Fixed ### -- Previously, some testing mocks we had resulted in us doing `import "testing"` - in non-`_test.go` code, which made some downstreams like Kubernetes unhappy. - This has been fixed. (#32) - -## [0.3.3] - 2024-09-30 ## - -### Fixed ### -- The mode and owner verification logic in `MkdirAll` has been removed. This - was originally intended to protect against some theoretical attacks but upon - further consideration these protections don't actually buy us anything and - they were causing spurious errors with more complicated filesystem setups. -- The "is the created directory empty" logic in `MkdirAll` has also been - removed. This was not causing us issues yet, but some pseudofilesystems (such - as `cgroup`) create non-empty directories and so this logic would've been - wrong for such cases. - -## [0.3.2] - 2024-09-13 ## - -### Changed ### -- Passing the `S_ISUID` or `S_ISGID` modes to `MkdirAllInRoot` will now return - an explicit error saying that those bits are ignored by `mkdirat(2)`. In the - past a different error was returned, but since the silent ignoring behaviour - is codified in the man pages a more explicit error seems apt. While silently - ignoring these bits would be the most compatible option, it could lead to - users thinking their code sets these bits when it doesn't. Programs that need - to deal with compatibility can mask the bits themselves. (#23, #25) - -### Fixed ### -- If a directory has `S_ISGID` set, then all child directories will have - `S_ISGID` set when created and a different gid will be used for any inode - created under the directory. Previously, the "expected owner and mode" - validation in `securejoin.MkdirAll` did not correctly handle this. We now - correctly handle this case. (#24, #25) - -## [0.3.1] - 2024-07-23 ## - -### Changed ### -- By allowing `Open(at)InRoot` to opt-out of the extra work done by `MkdirAll` - to do the necessary "partial lookups", `Open(at)InRoot` now does less work - for both implementations (resulting in a many-fold decrease in the number of - operations for `openat2`, and a modest improvement for non-`openat2`) and is - far more guaranteed to match the correct `openat2(RESOLVE_IN_ROOT)` - behaviour. -- We now use `readlinkat(fd, "")` where possible. For `Open(at)InRoot` this - effectively just means that we no longer risk getting spurious errors during - rename races. However, for our hardened procfs handler, this in theory should - prevent mount attacks from tricking us when doing magic-link readlinks (even - when using the unsafe host `/proc` handle). Unfortunately `Reopen` is still - potentially vulnerable to those kinds of somewhat-esoteric attacks. - - Technically this [will only work on post-2.6.39 kernels][linux-readlinkat-emptypath] - but it seems incredibly unlikely anyone is using `filepath-securejoin` on a - pre-2011 kernel. - -### Fixed ### -- Several improvements were made to the errors returned by `Open(at)InRoot` and - `MkdirAll` when dealing with invalid paths under the emulated (ie. - non-`openat2`) implementation. Previously, some paths would return the wrong - error (`ENOENT` when the last component was a non-directory), and other paths - would be returned as though they were acceptable (trailing-slash components - after a non-directory would be ignored by `Open(at)InRoot`). - - These changes were done to match `openat2`'s behaviour and purely is a - consistency fix (most users are going to be using `openat2` anyway). - -[linux-readlinkat-emptypath]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=65cfc6722361570bfe255698d9cd4dccaf47570d - -## [0.3.0] - 2024-07-11 ## - -### Added ### -- A new set of `*os.File`-based APIs have been added. These are adapted from - [libpathrs][] and we strongly suggest using them if possible (as they provide - far more protection against attacks than `SecureJoin`): - - - `Open(at)InRoot` resolves a path inside a rootfs and returns an `*os.File` - handle to the path. Note that the handle returned is an `O_PATH` handle, - which cannot be used for reading or writing (as well as some other - operations -- [see open(2) for more details][open.2]) - - - `Reopen` takes an `O_PATH` file handle and safely re-opens it to upgrade - it to a regular handle. This can also be used with non-`O_PATH` handles, - but `O_PATH` is the most obvious application. - - - `MkdirAll` is an implementation of `os.MkdirAll` that is safe to use to - create a directory tree within a rootfs. - - As these are new APIs, they may change in the future. However, they should be - safe to start migrating to as we have extensive tests ensuring they behave - correctly and are safe against various races and other attacks. - -[libpathrs]: https://github.com/openSUSE/libpathrs -[open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html - -## [0.2.5] - 2024-05-03 ## - -### Changed ### -- Some minor changes were made to how lexical components (like `..` and `.`) - are handled during path generation in `SecureJoin`. There is no behaviour - change as a result of this fix (the resulting paths are the same). - -### Fixed ### -- The error returned when we hit a symlink loop now references the correct - path. (#10) - -## [0.2.4] - 2023-09-06 ## - -### Security ### -- This release fixes a potential security issue in filepath-securejoin when - used on Windows ([GHSA-6xv5-86q9-7xr8][], which could be used to generate - paths outside of the provided rootfs in certain cases), as well as improving - the overall behaviour of filepath-securejoin when dealing with Windows paths - that contain volume names. Thanks to Paulo Gomes for discovering and fixing - these issues. - -### Fixed ### -- Switch to GitHub Actions for CI so we can test on Windows as well as Linux - and MacOS. - -[GHSA-6xv5-86q9-7xr8]: https://github.com/advisories/GHSA-6xv5-86q9-7xr8 - -## [0.2.3] - 2021-06-04 ## - -### Changed ### -- Switch to Go 1.13-style `%w` error wrapping, letting us drop the dependency - on `github.com/pkg/errors`. - -## [0.2.2] - 2018-09-05 ## - -### Changed ### -- Use `syscall.ELOOP` as the base error for symlink loops, rather than our own - (internal) error. This allows callers to more easily use `errors.Is` to check - for this case. - -## [0.2.1] - 2018-09-05 ## - -### Fixed ### -- Use our own `IsNotExist` implementation, which lets us handle `ENOTDIR` - properly within `SecureJoin`. - -## [0.2.0] - 2017-07-19 ## - -We now have 100% test coverage! - -### Added ### -- Add a `SecureJoinVFS` API that can be used for mocking (as we do in our new - tests) or for implementing custom handling of lookup operations (such as for - rootless containers, where work is necessary to access directories with weird - modes because we don't have `CAP_DAC_READ_SEARCH` or `CAP_DAC_OVERRIDE`). - -## 0.1.0 - 2017-07-19 - -This is our first release of `github.com/cyphar/filepath-securejoin`, -containing a full implementation with a coverage of 93.5% (the only missing -cases are the error cases, which are hard to mocktest at the moment). - -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD -[0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1 -[0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0 -[0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6 -[0.3.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...v0.3.5 -[0.3.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4 -[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.2...v0.3.3 -[0.3.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...v0.3.2 -[0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1 -[0.3.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.5...v0.3.0 -[0.2.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.4...v0.2.5 -[0.2.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.3...v0.2.4 -[0.2.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.2...v0.2.3 -[0.2.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.1...v0.2.2 -[0.2.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.0...v0.2.1 -[0.2.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.1.0...v0.2.0 diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/e2e/vendor/github.com/cyphar/filepath-securejoin/LICENSE deleted file mode 100644 index cb1ab88da0f..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -Copyright (C) 2017-2024 SUSE LLC. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/README.md b/e2e/vendor/github.com/cyphar/filepath-securejoin/README.md deleted file mode 100644 index eaeb53fcd0a..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/README.md +++ /dev/null @@ -1,169 +0,0 @@ -## `filepath-securejoin` ## - -[![Go Documentation](https://pkg.go.dev/badge/github.com/cyphar/filepath-securejoin.svg)](https://pkg.go.dev/github.com/cyphar/filepath-securejoin) -[![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml) - -### Old API ### - -This library was originally just an implementation of `SecureJoin` which was -[intended to be included in the Go standard library][go#20126] as a safer -`filepath.Join` that would restrict the path lookup to be inside a root -directory. - -The implementation was based on code that existed in several container -runtimes. Unfortunately, this API is **fundamentally unsafe** against attackers -that can modify path components after `SecureJoin` returns and before the -caller uses the path, allowing for some fairly trivial TOCTOU attacks. - -`SecureJoin` (and `SecureJoinVFS`) are still provided by this library to -support legacy users, but new users are strongly suggested to avoid using -`SecureJoin` and instead use the [new api](#new-api) or switch to -[libpathrs][libpathrs]. - -With the above limitations in mind, this library guarantees the following: - -* If no error is set, the resulting string **must** be a child path of - `root` and will not contain any symlink path components (they will all be - expanded). - -* When expanding symlinks, all symlink path components **must** be resolved - relative to the provided root. In particular, this can be considered a - userspace implementation of how `chroot(2)` operates on file paths. Note that - these symlinks will **not** be expanded lexically (`filepath.Clean` is not - called on the input before processing). - -* Non-existent path components are unaffected by `SecureJoin` (similar to - `filepath.EvalSymlinks`'s semantics). - -* The returned path will always be `filepath.Clean`ed and thus not contain any - `..` components. - -A (trivial) implementation of this function on GNU/Linux systems could be done -with the following (note that this requires root privileges and is far more -opaque than the implementation in this library, and also requires that -`readlink` is inside the `root` path and is trustworthy): - -```go -package securejoin - -import ( - "os/exec" - "path/filepath" -) - -func SecureJoin(root, unsafePath string) (string, error) { - unsafePath = string(filepath.Separator) + unsafePath - cmd := exec.Command("chroot", root, - "readlink", "--canonicalize-missing", "--no-newline", unsafePath) - output, err := cmd.CombinedOutput() - if err != nil { - return "", err - } - expanded := string(output) - return filepath.Join(root, expanded), nil -} -``` - -[libpathrs]: https://github.com/openSUSE/libpathrs -[go#20126]: https://github.com/golang/go/issues/20126 - -### New API ### - -While we recommend users switch to [libpathrs][libpathrs] as soon as it has a -stable release, some methods implemented by libpathrs have been ported to this -library to ease the transition. These APIs are only supported on Linux. - -These APIs are implemented such that `filepath-securejoin` will -opportunistically use certain newer kernel APIs that make these operations far -more secure. In particular: - -* All of the lookup operations will use [`openat2`][openat2.2] on new enough - kernels (Linux 5.6 or later) to restrict lookups through magic-links and - bind-mounts (for certain operations) and to make use of `RESOLVE_IN_ROOT` to - efficiently resolve symlinks within a rootfs. - -* The APIs provide hardening against a malicious `/proc` mount to either detect - or avoid being tricked by a `/proc` that is not legitimate. This is done - using [`openat2`][openat2.2] for all users, and privileged users will also be - further protected by using [`fsopen`][fsopen.2] and [`open_tree`][open_tree.2] - (Linux 5.2 or later). - -[openat2.2]: https://www.man7.org/linux/man-pages/man2/openat2.2.html -[fsopen.2]: https://github.com/brauner/man-pages-md/blob/main/fsopen.md -[open_tree.2]: https://github.com/brauner/man-pages-md/blob/main/open_tree.md - -#### `OpenInRoot` #### - -```go -func OpenInRoot(root, unsafePath string) (*os.File, error) -func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) -func Reopen(handle *os.File, flags int) (*os.File, error) -``` - -`OpenInRoot` is a much safer version of - -```go -path, err := securejoin.SecureJoin(root, unsafePath) -file, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) -``` - -that protects against various race attacks that could lead to serious security -issues, depending on the application. Note that the returned `*os.File` is an -`O_PATH` file descriptor, which is quite restricted. Callers will probably need -to use `Reopen` to get a more usable handle (this split is done to provide -useful features like PTY spawning and to avoid users accidentally opening bad -inodes that could cause a DoS). - -Callers need to be careful in how they use the returned `*os.File`. Usually it -is only safe to operate on the handle directly, and it is very easy to create a -security issue. [libpathrs][libpathrs] provides far more helpers to make using -these handles safer -- there is currently no plan to port them to -`filepath-securejoin`. - -`OpenatInRoot` is like `OpenInRoot` except that the root is provided using an -`*os.File`. This allows you to ensure that multiple `OpenatInRoot` (or -`MkdirAllHandle`) calls are operating on the same rootfs. - -> **NOTE**: Unlike `SecureJoin`, `OpenInRoot` will error out as soon as it hits -> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` -> which treated non-existent components as though they were real directories, -> and would allow for partial resolution of dangling symlinks. These behaviours -> are at odds with how Linux treats non-existent paths and dangling symlinks, -> and so these are no longer allowed. - -#### `MkdirAll` #### - -```go -func MkdirAll(root, unsafePath string, mode int) error -func MkdirAllHandle(root *os.File, unsafePath string, mode int) (*os.File, error) -``` - -`MkdirAll` is a much safer version of - -```go -path, err := securejoin.SecureJoin(root, unsafePath) -err = os.MkdirAll(path, mode) -``` - -that protects against the same kinds of races that `OpenInRoot` protects -against. - -`MkdirAllHandle` is like `MkdirAll` except that the root is provided using an -`*os.File` (the reason for this is the same as with `OpenatInRoot`) and an -`*os.File` of the final created directory is returned (this directory is -guaranteed to be effectively identical to the directory created by -`MkdirAllHandle`, which is not possible to ensure by just using `OpenatInRoot` -after `MkdirAll`). - -> **NOTE**: Unlike `SecureJoin`, `MkdirAll` will error out as soon as it hits -> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` -> which treated non-existent components as though they were real directories, -> and would allow for partial resolution of dangling symlinks. These behaviours -> are at odds with how Linux treats non-existent paths and dangling symlinks, -> and so these are no longer allowed. This means that `MkdirAll` will not -> create non-existent directories referenced by a dangling symlink. - -### License ### - -The license of this project is the same as Go, which is a BSD 3-clause license -available in the `LICENSE` file. diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/VERSION b/e2e/vendor/github.com/cyphar/filepath-securejoin/VERSION deleted file mode 100644 index 267577d47e4..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.4.1 diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/doc.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/doc.go deleted file mode 100644 index 1ec7d065ef4..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/doc.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package securejoin implements a set of helpers to make it easier to write Go -// code that is safe against symlink-related escape attacks. The primary idea -// is to let you resolve a path within a rootfs directory as if the rootfs was -// a chroot. -// -// securejoin has two APIs, a "legacy" API and a "modern" API. -// -// The legacy API is [SecureJoin] and [SecureJoinVFS]. These methods are -// **not** safe against race conditions where an attacker changes the -// filesystem after (or during) the [SecureJoin] operation. -// -// The new API is made up of [OpenInRoot] and [MkdirAll] (and derived -// functions). These are safe against racing attackers and have several other -// protections that are not provided by the legacy API. There are many more -// operations that most programs expect to be able to do safely, but we do not -// provide explicit support for them because we want to encourage users to -// switch to [libpathrs](https://github.com/openSUSE/libpathrs) which is a -// cross-language next-generation library that is entirely designed around -// operating on paths safely. -// -// securejoin has been used by several container runtimes (Docker, runc, -// Kubernetes, etc) for quite a few years as a de-facto standard for operating -// on container filesystem paths "safely". However, most users still use the -// legacy API which is unsafe against various attacks (there is a fairly long -// history of CVEs in dependent as a result). Users should switch to the modern -// API as soon as possible (or even better, switch to libpathrs). -// -// This project was initially intended to be included in the Go standard -// library, but [it was rejected](https://go.dev/issue/20126). There is now a -// [new Go proposal](https://go.dev/issue/67002) for a safe path resolution API -// that shares some of the goals of filepath-securejoin. However, that design -// is intended to work like `openat2(RESOLVE_BENEATH)` which does not fit the -// usecase of container runtimes and most system tools. -package securejoin diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go deleted file mode 100644 index 42452bbf9b0..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build linux && go1.20 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "fmt" -) - -// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except -// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) -// is only guaranteed to give you baseErr. -func wrapBaseError(baseErr, extraErr error) error { - return fmt.Errorf("%w: %w", extraErr, baseErr) -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go deleted file mode 100644 index e7adca3fd12..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build linux && !go1.20 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "fmt" -) - -type wrappedError struct { - inner error - isError error -} - -func (err wrappedError) Is(target error) bool { - return err.isError == target -} - -func (err wrappedError) Unwrap() error { - return err.inner -} - -func (err wrappedError) Error() string { - return fmt.Sprintf("%v: %v", err.isError, err.inner) -} - -// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except -// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) -// is only guaranteed to give you baseErr. -func wrapBaseError(baseErr, extraErr error) error { - return wrappedError{ - inner: baseErr, - isError: extraErr, - } -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go deleted file mode 100644 index ddd6fa9a41c..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build linux && go1.21 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "slices" - "sync" -) - -func slices_DeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S { - return slices.DeleteFunc(slice, delFn) -} - -func slices_Contains[S ~[]E, E comparable](slice S, val E) bool { - return slices.Contains(slice, val) -} - -func slices_Clone[S ~[]E, E any](slice S) S { - return slices.Clone(slice) -} - -func sync_OnceValue[T any](f func() T) func() T { - return sync.OnceValue(f) -} - -func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - return sync.OnceValues(f) -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go deleted file mode 100644 index f1e6fe7e717..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go +++ /dev/null @@ -1,124 +0,0 @@ -//go:build linux && !go1.21 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "sync" -) - -// These are very minimal implementations of functions that appear in Go 1.21's -// stdlib, included so that we can build on older Go versions. Most are -// borrowed directly from the stdlib, and a few are modified to be "obviously -// correct" without needing to copy too many other helpers. - -// clearSlice is equivalent to the builtin clear from Go 1.21. -// Copied from the Go 1.24 stdlib implementation. -func clearSlice[S ~[]E, E any](slice S) { - var zero E - for i := range slice { - slice[i] = zero - } -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := slices_IndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// Similar to the stdlib slices.Contains, except that we don't have -// slices.Index so we need to use slices.IndexFunc for this non-Func helper. -func slices_Contains[S ~[]E, E comparable](s S, v E) bool { - return slices_IndexFunc(s, func(e E) bool { return e == v }) >= 0 -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// Copied from the Go 1.24 stdlib implementation. -func sync_OnceValue[T any](f func() T) func() T { - var ( - once sync.Once - valid bool - p any - result T - ) - g := func() { - defer func() { - p = recover() - if !valid { - panic(p) - } - }() - result = f() - f = nil - valid = true - } - return func() T { - once.Do(g) - if !valid { - panic(p) - } - return result - } -} - -// Copied from the Go 1.24 stdlib implementation. -func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - var ( - once sync.Once - valid bool - p any - r1 T1 - r2 T2 - ) - g := func() { - defer func() { - p = recover() - if !valid { - panic(p) - } - }() - r1, r2 = f() - f = nil - valid = true - } - return func() (T1, T2) { - once.Do(g) - if !valid { - panic(p) - } - return r1, r2 - } -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/join.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/join.go deleted file mode 100644 index e6634d4778f..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/join.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -// Copyright (C) 2017-2025 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "os" - "path/filepath" - "strings" - "syscall" -) - -const maxSymlinkLimit = 255 - -// IsNotExist tells you if err is an error that implies that either the path -// accessed does not exist (or path components don't exist). This is -// effectively a more broad version of [os.IsNotExist]. -func IsNotExist(err error) bool { - // Check that it's not actually an ENOTDIR, which in some cases is a more - // convoluted case of ENOENT (usually involving weird paths). - return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) -} - -// errUnsafeRoot is returned if the user provides SecureJoinVFS with a path -// that contains ".." components. -var errUnsafeRoot = errors.New("root path provided to SecureJoin contains '..' components") - -// stripVolume just gets rid of the Windows volume included in a path. Based on -// some godbolt tests, the Go compiler is smart enough to make this a no-op on -// Linux. -func stripVolume(path string) string { - return path[len(filepath.VolumeName(path)):] -} - -// hasDotDot checks if the path contains ".." components in a platform-agnostic -// way. -func hasDotDot(path string) bool { - // If we are on Windows, strip any volume letters. It turns out that - // C:..\foo may (or may not) be a valid pathname and we need to handle that - // leading "..". - path = stripVolume(path) - // Look for "/../" in the path, but we need to handle leading and trailing - // ".."s by adding separators. Doing this with filepath.Separator is ugly - // so just convert to Unix-style "/" first. - path = filepath.ToSlash(path) - return strings.Contains("/"+path+"/", "/../") -} - -// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except -// that the returned path is guaranteed to be scoped inside the provided root -// path (when evaluated). Any symbolic links in the path are evaluated with the -// given root treated as the root of the filesystem, similar to a chroot. The -// filesystem state is evaluated through the given [VFS] interface (if nil, the -// standard [os].* family of functions are used). -// -// Note that the guarantees provided by this function only apply if the path -// components in the returned string are not modified (in other words are not -// replaced with symlinks on the filesystem) after this function has returned. -// Such a symlink race is necessarily out-of-scope of SecureJoinVFS. -// -// NOTE: Due to the above limitation, Linux users are strongly encouraged to -// use [OpenInRoot] instead, which does safely protect against these kinds of -// attacks. There is no way to solve this problem with SecureJoinVFS because -// the API is fundamentally wrong (you cannot return a "safe" path string and -// guarantee it won't be modified afterwards). -// -// Volume names in unsafePath are always discarded, regardless if they are -// provided via direct input or when evaluating symlinks. Therefore: -// -// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt" -// -// If the provided root is not [filepath.Clean] then an error will be returned, -// as such root paths are bordering on somewhat unsafe and using such paths is -// not best practice. We also strongly suggest that any root path is first -// fully resolved using [filepath.EvalSymlinks] or otherwise constructed to -// avoid containing symlink components. Of course, the root also *must not* be -// attacker-controlled. -func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { - // The root path must not contain ".." components, otherwise when we join - // the subpath we will end up with a weird path. We could work around this - // in other ways but users shouldn't be giving us non-lexical root paths in - // the first place. - if hasDotDot(root) { - return "", errUnsafeRoot - } - - // Use the os.* VFS implementation if none was specified. - if vfs == nil { - vfs = osVFS{} - } - - unsafePath = filepath.FromSlash(unsafePath) - var ( - currentPath string - remainingPath = unsafePath - linksWalked int - ) - for remainingPath != "" { - // On Windows, if we managed to end up at a path referencing a volume, - // drop the volume to make sure we don't end up with broken paths or - // escaping the root volume. - remainingPath = stripVolume(remainingPath) - - // Get the next path component. - var part string - if i := strings.IndexRune(remainingPath, filepath.Separator); i == -1 { - part, remainingPath = remainingPath, "" - } else { - part, remainingPath = remainingPath[:i], remainingPath[i+1:] - } - - // Apply the component lexically to the path we are building. - // currentPath does not contain any symlinks, and we are lexically - // dealing with a single component, so it's okay to do a filepath.Clean - // here. - nextPath := filepath.Join(string(filepath.Separator), currentPath, part) - if nextPath == string(filepath.Separator) { - currentPath = "" - continue - } - fullPath := root + string(filepath.Separator) + nextPath - - // Figure out whether the path is a symlink. - fi, err := vfs.Lstat(fullPath) - if err != nil && !IsNotExist(err) { - return "", err - } - // Treat non-existent path components the same as non-symlinks (we - // can't do any better here). - if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { - currentPath = nextPath - continue - } - - // It's a symlink, so get its contents and expand it by prepending it - // to the yet-unparsed path. - linksWalked++ - if linksWalked > maxSymlinkLimit { - return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} - } - - dest, err := vfs.Readlink(fullPath) - if err != nil { - return "", err - } - remainingPath = dest + string(filepath.Separator) + remainingPath - // Absolute symlinks reset any work we've already done. - if filepath.IsAbs(dest) { - currentPath = "" - } - } - - // There should be no lexical components like ".." left in the path here, - // but for safety clean up the path before joining it to the root. - finalPath := filepath.Join(string(filepath.Separator), currentPath) - return filepath.Join(root, finalPath), nil -} - -// SecureJoin is a wrapper around [SecureJoinVFS] that just uses the [os].* library -// of functions as the [VFS]. If in doubt, use this function over [SecureJoinVFS]. -func SecureJoin(root, unsafePath string) (string, error) { - return SecureJoinVFS(root, unsafePath, nil) -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go deleted file mode 100644 index be81e498d72..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go +++ /dev/null @@ -1,388 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "path" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -type symlinkStackEntry struct { - // (dir, remainingPath) is what we would've returned if the link didn't - // exist. This matches what openat2(RESOLVE_IN_ROOT) would return in - // this case. - dir *os.File - remainingPath string - // linkUnwalked is the remaining path components from the original - // Readlink which we have yet to walk. When this slice is empty, we - // drop the link from the stack. - linkUnwalked []string -} - -func (se symlinkStackEntry) String() string { - return fmt.Sprintf("<%s>/%s [->%s]", se.dir.Name(), se.remainingPath, strings.Join(se.linkUnwalked, "/")) -} - -func (se symlinkStackEntry) Close() { - _ = se.dir.Close() -} - -type symlinkStack []*symlinkStackEntry - -func (s *symlinkStack) IsEmpty() bool { - return s == nil || len(*s) == 0 -} - -func (s *symlinkStack) Close() { - if s != nil { - for _, link := range *s { - link.Close() - } - // TODO: Switch to clear once we switch to Go 1.21. - *s = nil - } -} - -var ( - errEmptyStack = errors.New("[internal] stack is empty") - errBrokenSymlinkStack = errors.New("[internal error] broken symlink stack") -) - -func (s *symlinkStack) popPart(part string) error { - if s == nil || s.IsEmpty() { - // If there is nothing in the symlink stack, then the part was from the - // real path provided by the user, and this is a no-op. - return errEmptyStack - } - if part == "." { - // "." components are no-ops -- we drop them when doing SwapLink. - return nil - } - - tailEntry := (*s)[len(*s)-1] - - // Double-check that we are popping the component we expect. - if len(tailEntry.linkUnwalked) == 0 { - return fmt.Errorf("%w: trying to pop component %q of empty stack entry %s", errBrokenSymlinkStack, part, tailEntry) - } - headPart := tailEntry.linkUnwalked[0] - if headPart != part { - return fmt.Errorf("%w: trying to pop component %q but the last stack entry is %s (%q)", errBrokenSymlinkStack, part, tailEntry, headPart) - } - - // Drop the component, but keep the entry around in case we are dealing - // with a "tail-chained" symlink. - tailEntry.linkUnwalked = tailEntry.linkUnwalked[1:] - return nil -} - -func (s *symlinkStack) PopPart(part string) error { - if err := s.popPart(part); err != nil { - if errors.Is(err, errEmptyStack) { - // Skip empty stacks. - err = nil - } - return err - } - - // Clean up any of the trailing stack entries that are empty. - for lastGood := len(*s) - 1; lastGood >= 0; lastGood-- { - entry := (*s)[lastGood] - if len(entry.linkUnwalked) > 0 { - break - } - entry.Close() - (*s) = (*s)[:lastGood] - } - return nil -} - -func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) error { - if s == nil { - return nil - } - // Split the link target and clean up any "" parts. - linkTargetParts := slices_DeleteFunc( - strings.Split(linkTarget, "/"), - func(part string) bool { return part == "" || part == "." }) - - // Copy the directory so the caller doesn't close our copy. - dirCopy, err := dupFile(dir) - if err != nil { - return err - } - - // Add to the stack. - *s = append(*s, &symlinkStackEntry{ - dir: dirCopy, - remainingPath: remainingPath, - linkUnwalked: linkTargetParts, - }) - return nil -} - -func (s *symlinkStack) SwapLink(linkPart string, dir *os.File, remainingPath, linkTarget string) error { - // If we are currently inside a symlink resolution, remove the symlink - // component from the last symlink entry, but don't remove the entry even - // if it's empty. If we are a "tail-chained" symlink (a trailing symlink we - // hit during a symlink resolution) we need to keep the old symlink until - // we finish the resolution. - if err := s.popPart(linkPart); err != nil { - if !errors.Is(err, errEmptyStack) { - return err - } - // Push the component regardless of whether the stack was empty. - } - return s.push(dir, remainingPath, linkTarget) -} - -func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) { - if s == nil || s.IsEmpty() { - return nil, "", false - } - tailEntry := (*s)[0] - *s = (*s)[1:] - return tailEntry.dir, tailEntry.remainingPath, true -} - -// partialLookupInRoot tries to lookup as much of the request path as possible -// within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing -// component of the requested path, returning a file handle to the final -// existing component and a string containing the remaining path components. -func partialLookupInRoot(root *os.File, unsafePath string) (*os.File, string, error) { - return lookupInRoot(root, unsafePath, true) -} - -func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) { - handle, remainingPath, err := lookupInRoot(root, unsafePath, false) - if remainingPath != "" && err == nil { - // should never happen - err = fmt.Errorf("[bug] non-empty remaining path when doing a non-partial lookup: %q", remainingPath) - } - // lookupInRoot(partial=false) will always close the handle if an error is - // returned, so no need to double-check here. - return handle, err -} - -func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { - unsafePath = filepath.ToSlash(unsafePath) // noop - - // This is very similar to SecureJoin, except that we operate on the - // components using file descriptors. We then return the last component we - // managed open, along with the remaining path components not opened. - - // Try to use openat2 if possible. - if hasOpenat2() { - return lookupOpenat2(root, unsafePath, partial) - } - - // Get the "actual" root path from /proc/self/fd. This is necessary if the - // root is some magic-link like /proc/$pid/root, in which case we want to - // make sure when we do checkProcSelfFdPath that we are using the correct - // root path. - logicalRootPath, err := procSelfFdReadlink(root) - if err != nil { - return nil, "", fmt.Errorf("get real root path: %w", err) - } - - currentDir, err := dupFile(root) - if err != nil { - return nil, "", fmt.Errorf("clone root fd: %w", err) - } - defer func() { - // If a handle is not returned, close the internal handle. - if Handle == nil { - _ = currentDir.Close() - } - }() - - // symlinkStack is used to emulate how openat2(RESOLVE_IN_ROOT) treats - // dangling symlinks. If we hit a non-existent path while resolving a - // symlink, we need to return the (dir, remainingPath) that we had when we - // hit the symlink (treating the symlink as though it were a regular file). - // The set of (dir, remainingPath) sets is stored within the symlinkStack - // and we add and remove parts when we hit symlink and non-symlink - // components respectively. We need a stack because of recursive symlinks - // (symlinks that contain symlink components in their target). - // - // Note that the stack is ONLY used for book-keeping. All of the actual - // path walking logic is still based on currentPath/remainingPath and - // currentDir (as in SecureJoin). - var symStack *symlinkStack - if partial { - symStack = new(symlinkStack) - defer symStack.Close() - } - - var ( - linksWalked int - currentPath string - remainingPath = unsafePath - ) - for remainingPath != "" { - // Save the current remaining path so if the part is not real we can - // return the path including the component. - oldRemainingPath := remainingPath - - // Get the next path component. - var part string - if i := strings.IndexByte(remainingPath, '/'); i == -1 { - part, remainingPath = remainingPath, "" - } else { - part, remainingPath = remainingPath[:i], remainingPath[i+1:] - } - // If we hit an empty component, we need to treat it as though it is - // "." so that trailing "/" and "//" components on a non-directory - // correctly return the right error code. - if part == "" { - part = "." - } - - // Apply the component lexically to the path we are building. - // currentPath does not contain any symlinks, and we are lexically - // dealing with a single component, so it's okay to do a filepath.Clean - // here. - nextPath := path.Join("/", currentPath, part) - // If we logically hit the root, just clone the root rather than - // opening the part and doing all of the other checks. - if nextPath == "/" { - if err := symStack.PopPart(part); err != nil { - return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err) - } - // Jump to root. - rootClone, err := dupFile(root) - if err != nil { - return nil, "", fmt.Errorf("clone root fd: %w", err) - } - _ = currentDir.Close() - currentDir = rootClone - currentPath = nextPath - continue - } - - // Try to open the next component. - nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - switch { - case err == nil: - st, err := nextDir.Stat() - if err != nil { - _ = nextDir.Close() - return nil, "", fmt.Errorf("stat component %q: %w", part, err) - } - - switch st.Mode() & os.ModeType { - case os.ModeSymlink: - // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See - // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and - // fstatat() with empty relative pathnames"). - linkDest, err := readlinkatFile(nextDir, "") - // We don't need the handle anymore. - _ = nextDir.Close() - if err != nil { - return nil, "", err - } - - linksWalked++ - if linksWalked > maxSymlinkLimit { - return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP} - } - - // Swap out the symlink's component for the link entry itself. - if err := symStack.SwapLink(part, currentDir, oldRemainingPath, linkDest); err != nil { - return nil, "", fmt.Errorf("walking into symlink %q failed: push symlink: %w", part, err) - } - - // Update our logical remaining path. - remainingPath = linkDest + "/" + remainingPath - // Absolute symlinks reset any work we've already done. - if path.IsAbs(linkDest) { - // Jump to root. - rootClone, err := dupFile(root) - if err != nil { - return nil, "", fmt.Errorf("clone root fd: %w", err) - } - _ = currentDir.Close() - currentDir = rootClone - currentPath = "/" - } - - default: - // If we are dealing with a directory, simply walk into it. - _ = currentDir.Close() - currentDir = nextDir - currentPath = nextPath - - // The part was real, so drop it from the symlink stack. - if err := symStack.PopPart(part); err != nil { - return nil, "", fmt.Errorf("walking into directory %q failed: %w", part, err) - } - - // If we are operating on a .., make sure we haven't escaped. - // We only have to check for ".." here because walking down - // into a regular component component cannot cause you to - // escape. This mirrors the logic in RESOLVE_IN_ROOT, except we - // have to check every ".." rather than only checking after a - // rename or mount on the system. - if part == ".." { - // Make sure the root hasn't moved. - if err := checkProcSelfFdPath(logicalRootPath, root); err != nil { - return nil, "", fmt.Errorf("root path moved during lookup: %w", err) - } - // Make sure the path is what we expect. - fullPath := logicalRootPath + nextPath - if err := checkProcSelfFdPath(fullPath, currentDir); err != nil { - return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err) - } - } - } - - default: - if !partial { - return nil, "", err - } - // If there are any remaining components in the symlink stack, we - // are still within a symlink resolution and thus we hit a dangling - // symlink. So pretend that the first symlink in the stack we hit - // was an ENOENT (to match openat2). - if oldDir, remainingPath, ok := symStack.PopTopSymlink(); ok { - _ = currentDir.Close() - return oldDir, remainingPath, err - } - // We have hit a final component that doesn't exist, so we have our - // partial open result. Note that we have to use the OLD remaining - // path, since the lookup failed. - return currentDir, oldRemainingPath, err - } - } - - // If the unsafePath had a trailing slash, we need to make sure we try to - // do a relative "." open so that we will correctly return an error when - // the final component is a non-directory (to match openat2). In the - // context of openat2, a trailing slash and a trailing "/." are completely - // equivalent. - if strings.HasSuffix(unsafePath, "/") { - nextDir, err := openatFile(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - if err != nil { - if !partial { - _ = currentDir.Close() - currentDir = nil - } - return currentDir, "", err - } - _ = currentDir.Close() - currentDir = nextDir - } - - // All of the components existed! - return currentDir, "", nil -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go deleted file mode 100644 index a17ae3b0387..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go +++ /dev/null @@ -1,236 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -var ( - errInvalidMode = errors.New("invalid permission mode") - errPossibleAttack = errors.New("possible attack detected") -) - -// modePermExt is like os.ModePerm except that it also includes the set[ug]id -// and sticky bits. -const modePermExt = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky - -//nolint:cyclop // this function needs to handle a lot of cases -func toUnixMode(mode os.FileMode) (uint32, error) { - sysMode := uint32(mode.Perm()) - if mode&os.ModeSetuid != 0 { - sysMode |= unix.S_ISUID - } - if mode&os.ModeSetgid != 0 { - sysMode |= unix.S_ISGID - } - if mode&os.ModeSticky != 0 { - sysMode |= unix.S_ISVTX - } - // We don't allow file type bits. - if mode&os.ModeType != 0 { - return 0, fmt.Errorf("%w %+.3o (%s): type bits not permitted", errInvalidMode, mode, mode) - } - // We don't allow other unknown modes. - if mode&^modePermExt != 0 || sysMode&unix.S_IFMT != 0 { - return 0, fmt.Errorf("%w %+.3o (%s): unknown mode bits", errInvalidMode, mode, mode) - } - return sysMode, nil -} - -// MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use -// in two respects: -// -// - The caller provides the root directory as an *[os.File] (preferably O_PATH) -// handle. This means that the caller can be sure which root directory is -// being used. Note that this can be emulated by using /proc/self/fd/... as -// the root path with [os.MkdirAll]. -// -// - Once all of the directories have been created, an *[os.File] O_PATH handle -// to the directory at unsafePath is returned to the caller. This is done in -// an effectively-race-free way (an attacker would only be able to swap the -// final directory component), which is not possible to emulate with -// [MkdirAll]. -// -// In addition, the returned handle is obtained far more efficiently than doing -// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after -// doing [MkdirAll]. If you intend to open the directory after creating it, you -// should use MkdirAllHandle. -func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) { - unixMode, err := toUnixMode(mode) - if err != nil { - return nil, err - } - // On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid - // bits. We could also silently ignore them but since we have very few - // users it seems more prudent to return an error so users notice that - // these bits will not be set. - if unixMode&^0o1777 != 0 { - return nil, fmt.Errorf("%w for mkdir %+.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode) - } - - // Try to open as much of the path as possible. - currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath) - defer func() { - if Err != nil { - _ = currentDir.Close() - } - }() - if err != nil && !errors.Is(err, unix.ENOENT) { - return nil, fmt.Errorf("find existing subpath of %q: %w", unsafePath, err) - } - - // If there is an attacker deleting directories as we walk into them, - // detect this proactively. Note this is guaranteed to detect if the - // attacker deleted any part of the tree up to currentDir. - // - // Once we walk into a dead directory, partialLookupInRoot would not be - // able to walk further down the tree (directories must be empty before - // they are deleted), and if the attacker has removed the entire tree we - // can be sure that anything that was originally inside a dead directory - // must also be deleted and thus is a dead directory in its own right. - // - // This is mostly a quality-of-life check, because mkdir will simply fail - // later if the attacker deletes the tree after this check. - if err := isDeadInode(currentDir); err != nil { - return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err) - } - - // Re-open the path to match the O_DIRECTORY reopen loop later (so that we - // always return a non-O_PATH handle). We also check that we actually got a - // directory. - if reopenDir, err := Reopen(currentDir, unix.O_DIRECTORY|unix.O_CLOEXEC); errors.Is(err, unix.ENOTDIR) { - return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR) - } else if err != nil { - return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err) - } else { - _ = currentDir.Close() - currentDir = reopenDir - } - - remainingParts := strings.Split(remainingPath, string(filepath.Separator)) - if slices_Contains(remainingParts, "..") { - // The path contained ".." components after the end of the "real" - // components. We could try to safely resolve ".." here but that would - // add a bunch of extra logic for something that it's not clear even - // needs to be supported. So just return an error. - // - // If we do filepath.Clean(remainingPath) then we end up with the - // problem that ".." can erase a trailing dangling symlink and produce - // a path that doesn't quite match what the user asked for. - return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath) - } - - // Create the remaining components. - for _, part := range remainingParts { - switch part { - case "", ".": - // Skip over no-op paths. - continue - } - - // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely - // create the final component without worrying about symlink-exchange - // attacks. - // - // If we get -EEXIST, it's possible that another program created the - // directory at the same time as us. In that case, just continue on as - // if we created it (if the created inode is not a directory, the - // following open call will fail). - if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) { - err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} - // Make the error a bit nicer if the directory is dead. - if deadErr := isDeadInode(currentDir); deadErr != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w (%w)", err, deadErr) - err = wrapBaseError(err, deadErr) - } - return nil, err - } - - // Get a handle to the next component. O_DIRECTORY means we don't need - // to use O_PATH. - var nextDir *os.File - if hasOpenat2() { - nextDir, err = openat2File(currentDir, part, &unix.OpenHow{ - Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV, - }) - } else { - nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - } - if err != nil { - return nil, err - } - _ = currentDir.Close() - currentDir = nextDir - - // It's possible that the directory we just opened was swapped by an - // attacker. Unfortunately there isn't much we can do to protect - // against this, and MkdirAll's behaviour is that we will reuse - // existing directories anyway so the need to protect against this is - // incredibly limited (and arguably doesn't even deserve mention here). - // - // Ideally we might want to check that the owner and mode match what we - // would've created -- unfortunately, it is non-trivial to verify that - // the owner and mode of the created directory match. While plain Unix - // DAC rules seem simple enough to emulate, there are a bunch of other - // factors that can change the mode or owner of created directories - // (default POSIX ACLs, mount options like uid=1,gid=2,umask=0 on - // filesystems like vfat, etc etc). We used to try to verify this but - // it just lead to a series of spurious errors. - // - // We could also check that the directory is non-empty, but - // unfortunately some pseduofilesystems (like cgroupfs) create - // non-empty directories, which would result in different spurious - // errors. - } - return currentDir, nil -} - -// MkdirAll is a race-safe alternative to the [os.MkdirAll] function, -// where the new directory is guaranteed to be within the root directory (if an -// attacker can move directories from inside the root to outside the root, the -// created directory tree might be outside of the root but the key constraint -// is that at no point will we walk outside of the directory tree we are -// creating). -// -// Effectively, MkdirAll(root, unsafePath, mode) is equivalent to -// -// path, _ := securejoin.SecureJoin(root, unsafePath) -// err := os.MkdirAll(path, mode) -// -// But is much safer. The above implementation is unsafe because if an attacker -// can modify the filesystem tree between [SecureJoin] and [os.MkdirAll], it is -// possible for MkdirAll to resolve unsafe symlink components and create -// directories outside of the root. -// -// If you plan to open the directory after you have created it or want to use -// an open directory handle as the root, you should use [MkdirAllHandle] instead. -// This function is a wrapper around [MkdirAllHandle]. -func MkdirAll(root, unsafePath string, mode os.FileMode) error { - rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return err - } - defer rootDir.Close() - - f, err := MkdirAllHandle(rootDir, unsafePath, mode) - if err != nil { - return err - } - _ = f.Close() - return nil -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/open_linux.go deleted file mode 100644 index 230be73f0eb..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/open_linux.go +++ /dev/null @@ -1,103 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "fmt" - "os" - "strconv" - - "golang.org/x/sys/unix" -) - -// OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided -// using an *[os.File] handle, to ensure that the correct root directory is used. -func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { - handle, err := completeLookupInRoot(root, unsafePath) - if err != nil { - return nil, &os.PathError{Op: "securejoin.OpenInRoot", Path: unsafePath, Err: err} - } - return handle, nil -} - -// OpenInRoot safely opens the provided unsafePath within the root. -// Effectively, OpenInRoot(root, unsafePath) is equivalent to -// -// path, _ := securejoin.SecureJoin(root, unsafePath) -// handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) -// -// But is much safer. The above implementation is unsafe because if an attacker -// can modify the filesystem tree between [SecureJoin] and [os.OpenFile], it is -// possible for the returned file to be outside of the root. -// -// Note that the returned handle is an O_PATH handle, meaning that only a very -// limited set of operations will work on the handle. This is done to avoid -// accidentally opening an untrusted file that could cause issues (such as a -// disconnected TTY that could cause a DoS, or some other issue). In order to -// use the returned handle, you can "upgrade" it to a proper handle using -// [Reopen]. -func OpenInRoot(root, unsafePath string) (*os.File, error) { - rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - defer rootDir.Close() - return OpenatInRoot(rootDir, unsafePath) -} - -// Reopen takes an *[os.File] handle and re-opens it through /proc/self/fd. -// Reopen(file, flags) is effectively equivalent to -// -// fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd()) -// os.OpenFile(fdPath, flags|unix.O_CLOEXEC) -// -// But with some extra hardenings to ensure that we are not tricked by a -// maliciously-configured /proc mount. While this attack scenario is not -// common, in container runtimes it is possible for higher-level runtimes to be -// tricked into configuring an unsafe /proc that can be used to attack file -// operations. See [CVE-2019-19921] for more details. -// -// [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw -func Reopen(handle *os.File, flags int) (*os.File, error) { - procRoot, err := getProcRoot() - if err != nil { - return nil, err - } - - // We can't operate on /proc/thread-self/fd/$n directly when doing a - // re-open, so we need to open /proc/thread-self/fd and then open a single - // final component. - procFdDir, closer, err := procThreadSelf(procRoot, "fd/") - if err != nil { - return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) - } - defer procFdDir.Close() - defer closer() - - // Try to detect if there is a mount on top of the magic-link we are about - // to open. If we are using unsafeHostProcRoot(), this could change after - // we check it (and there's nothing we can do about that) but for - // privateProcRoot() this should be guaranteed to be safe (at least since - // Linux 5.12[1], when anonymous mount namespaces were completely isolated - // from external mounts including mount propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - fdStr := strconv.Itoa(int(handle.Fd())) - if err := checkSymlinkOvermount(procRoot, procFdDir, fdStr); err != nil { - return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) - } - - flags |= unix.O_CLOEXEC - // Rather than just wrapping openatFile, open-code it so we can copy - // handle.Name(). - reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) - if err != nil { - return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) - } - return os.NewFile(uintptr(reopenFd), handle.Name()), nil -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go deleted file mode 100644 index f7a13e69ce8..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go +++ /dev/null @@ -1,127 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -var hasOpenat2 = sync_OnceValue(func() bool { - fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, - }) - if err != nil { - return false - } - _ = unix.Close(fd) - return true -}) - -func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { - // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve - // ".." while a mount or rename occurs anywhere on the system. This could - // happen spuriously, or as the result of an attacker trying to mess with - // us during lookup. - // - // In addition, scoped lookups have a "safety check" at the end of - // complete_walk which will return -EXDEV if the final path is not in the - // root. - return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && - (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) -} - -const scopedLookupMaxRetries = 10 - -func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) { - fullPath := dir.Name() + "/" + path - // Make sure we always set O_CLOEXEC. - how.Flags |= unix.O_CLOEXEC - var tries int - for tries < scopedLookupMaxRetries { - fd, err := unix.Openat2(int(dir.Fd()), path, how) - if err != nil { - if scopedLookupShouldRetry(how, err) { - // We retry a couple of times to avoid the spurious errors, and - // if we are being attacked then returning -EAGAIN is the best - // we can do. - tries++ - continue - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} - } - // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. - // NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise - // you'll get infinite recursion here. - if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { - if actualPath, err := rawProcSelfFdReadlink(fd); err == nil { - fullPath = actualPath - } - } - return os.NewFile(uintptr(fd), fullPath), nil - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack} -} - -func lookupOpenat2(root *os.File, unsafePath string, partial bool) (*os.File, string, error) { - if !partial { - file, err := openat2File(root, unsafePath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - return file, "", err - } - return partialLookupOpenat2(root, unsafePath) -} - -// partialLookupOpenat2 is an alternative implementation of -// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a -// handle to the deepest existing child of the requested path within the root. -func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) { - // TODO: Implement this as a git-bisect-like binary search. - - unsafePath = filepath.ToSlash(unsafePath) // noop - endIdx := len(unsafePath) - var lastError error - for endIdx > 0 { - subpath := unsafePath[:endIdx] - - handle, err := openat2File(root, subpath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - if err == nil { - // Jump over the slash if we have a non-"" remainingPath. - if endIdx < len(unsafePath) { - endIdx += 1 - } - // We found a subpath! - return handle, unsafePath[endIdx:], lastError - } - if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { - // That path doesn't exist, let's try the next directory up. - endIdx = strings.LastIndexByte(subpath, '/') - lastError = err - continue - } - return nil, "", fmt.Errorf("open subpath: %w", err) - } - // If we couldn't open anything, the whole subpath is missing. Return a - // copy of the root fd so that the caller doesn't close this one by - // accident. - rootClone, err := dupFile(root) - if err != nil { - return nil, "", err - } - return rootClone, unsafePath, lastError -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go deleted file mode 100644 index 949fb5f2d82..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -func dupFile(f *os.File) (*os.File, error) { - fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) - } - return os.NewFile(uintptr(fd), f.Name()), nil -} - -func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.O_CLOEXEC - fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode)) - if err != nil { - return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err} - } - // All of the paths we use with openatFile(2) are guaranteed to be - // lexically safe, so we can use path.Join here. - fullPath := filepath.Join(dir.Name(), path) - return os.NewFile(uintptr(fd), fullPath), nil -} - -func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) { - var stat unix.Stat_t - if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil { - return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err} - } - return stat, nil -} - -func readlinkatFile(dir *os.File, path string) (string, error) { - size := 4096 - for { - linkBuf := make([]byte, size) - n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf) - if err != nil { - return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err} - } - if n != size { - return string(linkBuf[:n]), nil - } - // Possible truncation, resize the buffer. - size *= 2 - } -} diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go deleted file mode 100644 index 809a579cbdb..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go +++ /dev/null @@ -1,452 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "runtime" - "strconv" - - "golang.org/x/sys/unix" -) - -func fstat(f *os.File) (unix.Stat_t, error) { - var stat unix.Stat_t - if err := unix.Fstat(int(f.Fd()), &stat); err != nil { - return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err} - } - return stat, nil -} - -func fstatfs(f *os.File) (unix.Statfs_t, error) { - var statfs unix.Statfs_t - if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil { - return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err} - } - return statfs, nil -} - -// The kernel guarantees that the root inode of a procfs mount has an -// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. -const ( - procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC - procRootIno = 1 // PROC_ROOT_INO -) - -func verifyProcRoot(procRoot *os.File) error { - if statfs, err := fstatfs(procRoot); err != nil { - return err - } else if statfs.Type != procSuperMagic { - return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) - } - if stat, err := fstat(procRoot); err != nil { - return err - } else if stat.Ino != procRootIno { - return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) - } - return nil -} - -var hasNewMountApi = sync_OnceValue(func() bool { - // All of the pieces of the new mount API we use (fsopen, fsconfig, - // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can - // just check for one of the syscalls and the others should also be - // available. - // - // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. - // This is equivalent to openat(2), but tells us if open_tree is - // available (and thus all of the other basic new mount API syscalls). - // open_tree(2) is most light-weight syscall to test here. - // - // [1]: merge commit 400913252d09 - // [2]: - fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) - if err != nil { - return false - } - _ = unix.Close(fd) - return true -}) - -func fsopen(fsName string, flags int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSOPEN_CLOEXEC - fd, err := unix.Fsopen(fsName, flags) - if err != nil { - return nil, os.NewSyscallError("fsopen "+fsName, err) - } - return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil -} - -func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSMOUNT_CLOEXEC - fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) - if err != nil { - return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) - } - return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil -} - -func newPrivateProcMount() (*os.File, error) { - procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC) - if err != nil { - return nil, err - } - defer procfsCtx.Close() - - // Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors. - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") - - // Get an actual handle. - if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { - return nil, os.NewSyscallError("fsconfig create procfs", err) - } - return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) -} - -func openTree(dir *os.File, path string, flags uint) (*os.File, error) { - dirFd := -int(unix.EBADF) - dirName := "." - if dir != nil { - dirFd = int(dir.Fd()) - dirName = dir.Name() - } - // Make sure we always set O_CLOEXEC. - flags |= unix.OPEN_TREE_CLOEXEC - fd, err := unix.OpenTree(dirFd, path, flags) - if err != nil { - return nil, &os.PathError{Op: "open_tree", Path: path, Err: err} - } - return os.NewFile(uintptr(fd), dirName+"/"+path), nil -} - -func clonePrivateProcMount() (_ *os.File, Err error) { - // Try to make a clone without using AT_RECURSIVE if we can. If this works, - // we can be sure there are no over-mounts and so if the root is valid then - // we're golden. Otherwise, we have to deal with over-mounts. - procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE) - if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { - procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) - } - if err != nil { - return nil, fmt.Errorf("creating a detached procfs clone: %w", err) - } - defer func() { - if Err != nil { - _ = procfsHandle.Close() - } - }() - if err := verifyProcRoot(procfsHandle); err != nil { - return nil, err - } - return procfsHandle, nil -} - -func privateProcRoot() (*os.File, error) { - if !hasNewMountApi() || hookForceGetProcRootUnsafe() { - return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) - } - // Try to create a new procfs mount from scratch if we can. This ensures we - // can get a procfs mount even if /proc is fake (for whatever reason). - procRoot, err := newPrivateProcMount() - if err != nil || hookForcePrivateProcRootOpenTree(procRoot) { - // Try to clone /proc then... - procRoot, err = clonePrivateProcMount() - } - return procRoot, err -} - -func unsafeHostProcRoot() (_ *os.File, Err error) { - procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - defer func() { - if Err != nil { - _ = procRoot.Close() - } - }() - if err := verifyProcRoot(procRoot); err != nil { - return nil, err - } - return procRoot, nil -} - -func doGetProcRoot() (*os.File, error) { - procRoot, err := privateProcRoot() - if err != nil { - // Fall back to using a /proc handle if making a private mount failed. - // If we have openat2, at least we can avoid some kinds of over-mount - // attacks, but without openat2 there's not much we can do. - procRoot, err = unsafeHostProcRoot() - } - return procRoot, err -} - -var getProcRoot = sync_OnceValues(func() (*os.File, error) { - return doGetProcRoot() -}) - -var hasProcThreadSelf = sync_OnceValue(func() bool { - return unix.Access("/proc/thread-self/", unix.F_OK) == nil -}) - -var errUnsafeProcfs = errors.New("unsafe procfs detected") - -type procThreadSelfCloser func() - -// procThreadSelf returns a handle to /proc/thread-self/ (or an -// equivalent handle on older kernels where /proc/thread-self doesn't exist). -// Once finished with the handle, you must call the returned closer function -// (runtime.UnlockOSThread). You must not pass the returned *os.File to other -// Go threads or use the handle after calling the closer. -// -// This is similar to ProcThreadSelf from runc, but with extra hardening -// applied and using *os.File. -func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) { - // We need to lock our thread until the caller is done with the handle - // because between getting the handle and using it we could get interrupted - // by the Go runtime and hit the case where the underlying thread is - // swapped out and the original thread is killed, resulting in - // pull-your-hair-out-hard-to-debug issues in the caller. - runtime.LockOSThread() - defer func() { - if Err != nil { - runtime.UnlockOSThread() - } - }() - - // Figure out what prefix we want to use. - threadSelf := "thread-self/" - if !hasProcThreadSelf() || hookForceProcSelfTask() { - /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually. - threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/" - if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() { - // In this case, we running in a pid namespace that doesn't match - // the /proc mount we have. This can happen inside runc. - // - // Unfortunately, there is no nice way to get the correct TID to - // use here because of the age of the kernel, so we have to just - // use /proc/self and hope that it works. - threadSelf = "self/" - } - } - - // Grab the handle. - var ( - handle *os.File - err error - ) - if hasOpenat2() { - // We prefer being able to use RESOLVE_NO_XDEV if we can, to be - // absolutely sure we are operating on a clean /proc handle that - // doesn't have any cheeky overmounts that could trick us (including - // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't - // strictly needed, but just use it since we have it. - // - // NOTE: /proc/self is technically a magic-link (the contents of the - // symlink are generated dynamically), but it doesn't use - // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. - // - // NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses - // procSelfFdReadlink to clean up the returned f.Name() if we use - // RESOLVE_IN_ROOT (which would lead to an infinite recursion). - handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, - }) - if err != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) - return nil, nil, wrapBaseError(err, errUnsafeProcfs) - } - } else { - handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - if err != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) - return nil, nil, wrapBaseError(err, errUnsafeProcfs) - } - defer func() { - if Err != nil { - _ = handle.Close() - } - }() - // We can't detect bind-mounts of different parts of procfs on top of - // /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we - // aren't on the wrong filesystem here. - if statfs, err := fstatfs(handle); err != nil { - return nil, nil, err - } else if statfs.Type != procSuperMagic { - return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type) - } - } - return handle, runtime.UnlockOSThread, nil -} - -// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to -// avoid bumping the requirement for a single constant we can just define it -// ourselves. -const STATX_MNT_ID_UNIQUE = 0x4000 - -var hasStatxMountId = sync_OnceValue(func() bool { - var ( - stx unix.Statx_t - // We don't care which mount ID we get. The kernel will give us the - // unique one if it is supported. - wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID - ) - err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) - return err == nil && stx.Mask&wantStxMask != 0 -}) - -func getMountId(dir *os.File, path string) (uint64, error) { - // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. - if !hasStatxMountId() { - return 0, nil - } - - var ( - stx unix.Statx_t - // We don't care which mount ID we get. The kernel will give us the - // unique one if it is supported. - wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID - ) - - err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx) - if stx.Mask&wantStxMask == 0 { - // It's not a kernel limitation, for some reason we couldn't get a - // mount ID. Assume it's some kind of attack. - err = fmt.Errorf("%w: could not get mount id", errUnsafeProcfs) - } - if err != nil { - return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: dir.Name() + "/" + path, Err: err} - } - return stx.Mnt_id, nil -} - -func checkSymlinkOvermount(procRoot *os.File, dir *os.File, path string) error { - // Get the mntId of our procfs handle. - expectedMountId, err := getMountId(procRoot, "") - if err != nil { - return err - } - // Get the mntId of the target magic-link. - gotMountId, err := getMountId(dir, path) - if err != nil { - return err - } - // As long as the directory mount is alive, even with wrapping mount IDs, - // we would expect to see a different mount ID here. (Of course, if we're - // using unsafeHostProcRoot() then an attaker could change this after we - // did this check.) - if expectedMountId != gotMountId { - return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId) - } - return nil -} - -func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) { - fdPath := fmt.Sprintf("fd/%d", fd) - procFdLink, closer, err := procThreadSelf(procRoot, fdPath) - if err != nil { - return "", fmt.Errorf("get safe /proc/thread-self/%s handle: %w", fdPath, err) - } - defer procFdLink.Close() - defer closer() - - // Try to detect if there is a mount on top of the magic-link. Since we use the handle directly - // provide to the closure. If the closure uses the handle directly, this - // should be safe in general (a mount on top of the path afterwards would - // not affect the handle itself) and will definitely be safe if we are - // using privateProcRoot() (at least since Linux 5.12[1], when anonymous - // mount namespaces were completely isolated from external mounts including - // mount propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - if err := checkSymlinkOvermount(procRoot, procFdLink, ""); err != nil { - return "", fmt.Errorf("check safety of /proc/thread-self/fd/%d magiclink: %w", fd, err) - } - - // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit - // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty - // relative pathnames"). - return readlinkatFile(procFdLink, "") -} - -func rawProcSelfFdReadlink(fd int) (string, error) { - procRoot, err := getProcRoot() - if err != nil { - return "", err - } - return doRawProcSelfFdReadlink(procRoot, fd) -} - -func procSelfFdReadlink(f *os.File) (string, error) { - return rawProcSelfFdReadlink(int(f.Fd())) -} - -var ( - errPossibleBreakout = errors.New("possible breakout detected") - errInvalidDirectory = errors.New("wandered into deleted directory") - errDeletedInode = errors.New("cannot verify path of deleted inode") -) - -func isDeadInode(file *os.File) error { - // If the nlink of a file drops to 0, there is an attacker deleting - // directories during our walk, which could result in weird /proc values. - // It's better to error out in this case. - stat, err := fstat(file) - if err != nil { - return fmt.Errorf("check for dead inode: %w", err) - } - if stat.Nlink == 0 { - err := errDeletedInode - if stat.Mode&unix.S_IFMT == unix.S_IFDIR { - err = errInvalidDirectory - } - return fmt.Errorf("%w %q", err, file.Name()) - } - return nil -} - -func checkProcSelfFdPath(path string, file *os.File) error { - if err := isDeadInode(file); err != nil { - return err - } - actualPath, err := procSelfFdReadlink(file) - if err != nil { - return fmt.Errorf("get path of handle: %w", err) - } - if actualPath != path { - return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path) - } - return nil -} - -// Test hooks used in the procfs tests to verify that the fallback logic works. -// See testing_mocks_linux_test.go and procfs_linux_test.go for more details. -var ( - hookForcePrivateProcRootOpenTree = hookDummyFile - hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile - hookForceGetProcRootUnsafe = hookDummy - - hookForceProcSelfTask = hookDummy - hookForceProcSelf = hookDummy -) - -func hookDummy() bool { return false } -func hookDummyFile(_ *os.File) bool { return false } diff --git a/e2e/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/e2e/vendor/github.com/cyphar/filepath-securejoin/vfs.go deleted file mode 100644 index 36373f8c517..00000000000 --- a/e2e/vendor/github.com/cyphar/filepath-securejoin/vfs.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import "os" - -// In future this should be moved into a separate package, because now there -// are several projects (umoci and go-mtree) that are using this sort of -// interface. - -// VFS is the minimal interface necessary to use [SecureJoinVFS]. A nil VFS is -// equivalent to using the standard [os].* family of functions. This is mainly -// used for the purposes of mock testing, but also can be used to otherwise use -// [SecureJoinVFS] with VFS-like system. -type VFS interface { - // Lstat returns an [os.FileInfo] describing the named file. If the - // file is a symbolic link, the returned [os.FileInfo] describes the - // symbolic link. Lstat makes no attempt to follow the link. - // The semantics are identical to [os.Lstat]. - Lstat(name string) (os.FileInfo, error) - - // Readlink returns the destination of the named symbolic link. - // The semantics are identical to [os.Readlink]. - Readlink(name string) (string, error) -} - -// osVFS is the "nil" VFS, in that it just passes everything through to the os -// module. -type osVFS struct{} - -func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } - -func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/e2e/vendor/github.com/docker/go-units/CONTRIBUTING.md b/e2e/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784ec..00000000000 --- a/e2e/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/e2e/vendor/github.com/docker/go-units/LICENSE b/e2e/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc316..00000000000 --- a/e2e/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/github.com/docker/go-units/MAINTAINERS b/e2e/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 4aac7c74110..00000000000 --- a/e2e/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "akihiro.suda.cz@hco.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/e2e/vendor/github.com/docker/go-units/README.md b/e2e/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e1345..00000000000 --- a/e2e/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/e2e/vendor/github.com/docker/go-units/circle.yml b/e2e/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index af9d6055293..00000000000 --- a/e2e/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get golang.org/x/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/e2e/vendor/github.com/docker/go-units/duration.go b/e2e/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index 48dd8744d43..00000000000 --- a/e2e/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/e2e/vendor/github.com/docker/go-units/size.go b/e2e/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index c245a89513f..00000000000 --- a/e2e/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,154 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[byte]int64 - -var ( - decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} - binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} -) - -var ( - decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} -) - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - // TODO: rewrite to use strings.Cut if there's a space - // once Go < 1.18 is deprecated. - sep := strings.LastIndexAny(sizeStr, "01234567890. ") - if sep == -1 { - // There should be at least a digit. - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - var num, sfx string - if sizeStr[sep] != ' ' { - num = sizeStr[:sep+1] - sfx = sizeStr[sep+1:] - } else { - // Omit the space separator. - num = sizeStr[:sep] - sfx = sizeStr[sep+1:] - } - - size, err := strconv.ParseFloat(num, 64) - if err != nil { - return -1, err - } - // Backward compatibility: reject negative sizes. - if size < 0 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - if len(sfx) == 0 { - return int64(size), nil - } - - // Process the suffix. - - if len(sfx) > 3 { // Too long. - goto badSuffix - } - sfx = strings.ToLower(sfx) - // Trivial case: b suffix. - if sfx[0] == 'b' { - if len(sfx) > 1 { // no extra characters allowed after b. - goto badSuffix - } - return int64(size), nil - } - // A suffix from the map. - if mul, ok := uMap[sfx[0]]; ok { - size *= float64(mul) - } else { - goto badSuffix - } - - // The suffix may have extra "b" or "ib" (e.g. KiB or MB). - switch { - case len(sfx) == 2 && sfx[1] != 'b': - goto badSuffix - case len(sfx) == 3 && sfx[1:] != "ib": - goto badSuffix - } - - return int64(size), nil - -badSuffix: - return -1, fmt.Errorf("invalid suffix: '%s'", sfx) -} diff --git a/e2e/vendor/github.com/docker/go-units/ulimit.go b/e2e/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index fca0400cc82..00000000000 --- a/e2e/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,123 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if *hard != -1 { - if soft == -1 { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) - } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/e2e/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/e2e/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 92b78048e23..6f24dfff562 100644 --- a/e2e/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/e2e/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,8 @@ # Change history of go-restful +## [v3.12.2] - 2025-02-21 + +- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) ## [v3.12.1] - 2024-05-28 @@ -18,7 +21,7 @@ - fix by restoring custom JSON handler functions (Mike Beaumont #540) -## [v3.12.0] - 2023-08-19 +## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/e2e/vendor/github.com/emicklei/go-restful/v3/README.md b/e2e/vendor/github.com/emicklei/go-restful/v3/README.md index 7234604e47b..3fb40d19808 100644 --- a/e2e/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/e2e/vendor/github.com/emicklei/go-restful/v3/README.md @@ -3,7 +3,7 @@ go-restful package for building REST-style Web Services using Google Go [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) - [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) diff --git a/e2e/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/e2e/vendor/github.com/emicklei/go-restful/v3/jsr311.go index a9b3faaa81f..7f04bd90533 100644 --- a/e2e/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/e2e/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma return params } -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { candidates := make([]*Route, 0, 8) for i, each := range routes { @@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept @@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } - // if POST,PUT,PATCH without body - method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") - if (method == http.MethodPost || - method == http.MethodPut || - method == http.MethodPatch) && (length == "" || length == "0") { - return nil, NewError( - http.StatusUnsupportedMediaType, - fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } return nil, NewError( http.StatusNotAcceptable, - fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", "))) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/e2e/vendor/github.com/emicklei/go-restful/v3/route.go b/e2e/vendor/github.com/emicklei/go-restful/v3/route.go index 306c44be779..a2056e2acbb 100644 --- a/e2e/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/e2e/vendor/github.com/emicklei/go-restful/v3/route.go @@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool { } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +// If the route does not specify Consumes then return true (*/*). +// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE. func (r Route) matchesContentType(mimeTypes string) bool { if len(r.Consumes) == 0 { diff --git a/e2e/vendor/github.com/euank/go-kmsg-parser/kmsgparser/kmsgparser.go b/e2e/vendor/github.com/euank/go-kmsg-parser/kmsgparser/kmsgparser.go deleted file mode 100644 index df160cad57b..00000000000 --- a/e2e/vendor/github.com/euank/go-kmsg-parser/kmsgparser/kmsgparser.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2016 Euan Kemp - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package kmsgparser implements a parser for the Linux `/dev/kmsg` format. -// More information about this format may be found here: -// https://www.kernel.org/doc/Documentation/ABI/testing/dev-kmsg -// Some parts of it are slightly inspired by rsyslog's contrib module: -// https://github.com/rsyslog/rsyslog/blob/v8.22.0/contrib/imkmsg/kmsg.c -package kmsgparser - -import ( - "fmt" - "io" - "os" - "strconv" - "strings" - "syscall" - "time" -) - -// Parser is a parser for the kernel ring buffer found at /dev/kmsg -type Parser interface { - // SeekEnd moves the parser to the end of the kmsg queue. - SeekEnd() error - // Parse provides a channel of messages read from the kernel ring buffer. - // When first called, it will read the existing ringbuffer, after which it will emit new messages as they occur. - Parse() <-chan Message - // SetLogger sets the logger that will be used to report malformed kernel - // ringbuffer lines or unexpected kmsg read errors. - SetLogger(Logger) - // Close closes the underlying kmsg reader for this parser - Close() error -} - -// Message represents a given kmsg logline, including its timestamp (as -// calculated based on offset from boot time), its possibly multi-line body, -// and so on. More information about these mssages may be found here: -// https://www.kernel.org/doc/Documentation/ABI/testing/dev-kmsg -type Message struct { - Priority int - SequenceNumber int - Timestamp time.Time - Message string -} - -func NewParser() (Parser, error) { - f, err := os.Open("/dev/kmsg") - if err != nil { - return nil, err - } - - bootTime, err := getBootTime() - if err != nil { - return nil, err - } - - return &parser{ - log: &StandardLogger{nil}, - kmsgReader: f, - bootTime: bootTime, - }, nil -} - -type ReadSeekCloser interface { - io.ReadCloser - io.Seeker -} - -type parser struct { - log Logger - kmsgReader ReadSeekCloser - bootTime time.Time -} - -func getBootTime() (time.Time, error) { - var sysinfo syscall.Sysinfo_t - err := syscall.Sysinfo(&sysinfo) - if err != nil { - return time.Time{}, fmt.Errorf("could not get boot time: %v", err) - } - // sysinfo only has seconds - return time.Now().Add(-1 * (time.Duration(sysinfo.Uptime) * time.Second)), nil -} - -func (p *parser) SetLogger(log Logger) { - p.log = log -} - -func (p *parser) Close() error { - return p.kmsgReader.Close() -} - -func (p *parser) SeekEnd() error { - _, err := p.kmsgReader.Seek(0, os.SEEK_END) - return err -} - -// Parse will read from the provided reader and provide a channel of messages -// parsed. -// If the provided reader *is not* a proper Linux kmsg device, Parse might not -// behave correctly since it relies on specific behavior of `/dev/kmsg` -// -// A goroutine is created to process the provided reader. The goroutine will -// exit when the given reader is closed. -// Closing the passed in reader will cause the goroutine to exit. -func (p *parser) Parse() <-chan Message { - - output := make(chan Message, 1) - - go func() { - defer close(output) - msg := make([]byte, 8192) - for { - // Each read call gives us one full message. - // https://www.kernel.org/doc/Documentation/ABI/testing/dev-kmsg - n, err := p.kmsgReader.Read(msg) - if err != nil { - if err == syscall.EPIPE { - p.log.Warningf("short read from kmsg; skipping") - continue - } - - if err == io.EOF { - p.log.Infof("kmsg reader closed, shutting down") - return - } - - p.log.Errorf("error reading /dev/kmsg: %v", err) - return - } - - msgStr := string(msg[:n]) - - message, err := p.parseMessage(msgStr) - if err != nil { - p.log.Warningf("unable to parse kmsg message %q: %v", msgStr, err) - continue - } - - output <- message - } - }() - - return output -} - -func (p *parser) parseMessage(input string) (Message, error) { - // Format: - // PRIORITY,SEQUENCE_NUM,TIMESTAMP,-;MESSAGE - parts := strings.SplitN(input, ";", 2) - if len(parts) != 2 { - return Message{}, fmt.Errorf("invalid kmsg; must contain a ';'") - } - - metadata, message := parts[0], parts[1] - - metadataParts := strings.Split(metadata, ",") - if len(metadataParts) < 3 { - return Message{}, fmt.Errorf("invalid kmsg: must contain at least 3 ',' separated pieces at the start") - } - - priority, sequence, timestamp := metadataParts[0], metadataParts[1], metadataParts[2] - - prioNum, err := strconv.Atoi(priority) - if err != nil { - return Message{}, fmt.Errorf("could not parse %q as priority: %v", priority, err) - } - - sequenceNum, err := strconv.Atoi(sequence) - if err != nil { - return Message{}, fmt.Errorf("could not parse %q as sequence number: %v", priority, err) - } - - timestampUsFromBoot, err := strconv.ParseInt(timestamp, 10, 64) - if err != nil { - return Message{}, fmt.Errorf("could not parse %q as timestamp: %v", priority, err) - } - // timestamp is offset in microsecond from boottime. - msgTime := p.bootTime.Add(time.Duration(timestampUsFromBoot) * time.Microsecond) - - return Message{ - Priority: prioNum, - SequenceNumber: sequenceNum, - Timestamp: msgTime, - Message: message, - }, nil -} diff --git a/e2e/vendor/github.com/euank/go-kmsg-parser/kmsgparser/log.go b/e2e/vendor/github.com/euank/go-kmsg-parser/kmsgparser/log.go deleted file mode 100644 index 6ea2d96dfe1..00000000000 --- a/e2e/vendor/github.com/euank/go-kmsg-parser/kmsgparser/log.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2016 Euan Kemp - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kmsgparser - -import stdlog "log" - -// Logger is a glog compatible logging interface -// The StandardLogger struct can be used to wrap a log.Logger from the golang -// "log" package to create a standard a logger fulfilling this interface as -// well. -type Logger interface { - Warningf(string, ...interface{}) - Infof(string, ...interface{}) - Errorf(string, ...interface{}) -} - -// StandardLogger adapts the "log" package's Logger interface to be a Logger -type StandardLogger struct { - *stdlog.Logger -} - -func (s *StandardLogger) Warningf(fmt string, args ...interface{}) { - if s.Logger == nil { - return - } - s.Logger.Printf("[WARNING] "+fmt, args) -} - -func (s *StandardLogger) Infof(fmt string, args ...interface{}) { - if s.Logger == nil { - return - } - s.Logger.Printf("[INFO] "+fmt, args) -} - -func (s *StandardLogger) Errorf(fmt string, args ...interface{}) { - if s.Logger == nil { - return - } - s.Logger.Printf("[INFO] "+fmt, args) -} diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/e2e/vendor/github.com/fsnotify/fsnotify/.cirrus.yml deleted file mode 100644 index ffc7b992b3c..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ /dev/null @@ -1,13 +0,0 @@ -freebsd_task: - name: 'FreeBSD' - freebsd_instance: - image_family: freebsd-13-2 - install_script: - - pkg update -f - - pkg install -y go - test_script: - # run tests as user "cirrus" instead of root - - pw useradd cirrus -m - - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/.gitattributes b/e2e/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0a..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/.gitignore b/e2e/vendor/github.com/fsnotify/fsnotify/.gitignore deleted file mode 100644 index 391cc076b12..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -# go test -c output -*.test -*.test.exe - -# Output of go build ./cmd/fsnotify -/fsnotify -/fsnotify.exe diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/.mailmap b/e2e/vendor/github.com/fsnotify/fsnotify/.mailmap deleted file mode 100644 index a04f2907fed..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/.mailmap +++ /dev/null @@ -1,2 +0,0 @@ -Chris Howey -Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/e2e/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md deleted file mode 100644 index e0e57575496..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ /dev/null @@ -1,541 +0,0 @@ -# Changelog - -Unreleased ----------- -Nothing yet. - -1.7.0 - 2023-10-22 ------------------- -This version of fsnotify needs Go 1.17. - -### Additions - -- illumos: add FEN backend to support illumos and Solaris. ([#371]) - -- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful - in cases where you can't control the kernel buffer and receive a large number - of events in bursts. ([#550], [#572]) - -- all: add `AddWith()`, which is identical to `Add()` but allows passing - options. ([#521]) - -- windows: allow setting the ReadDirectoryChangesW() buffer size with - `fsnotify.WithBufferSize()`; the default of 64K is the highest value that - works on all platforms and is enough for most purposes, but in some cases a - highest buffer is needed. ([#521]) - -### Changes and fixes - -- inotify: remove watcher if a watched path is renamed ([#518]) - - After a rename the reported name wasn't updated, or even an empty string. - Inotify doesn't provide any good facilities to update it, so just remove the - watcher. This is already how it worked on kqueue and FEN. - - On Windows this does work, and remains working. - -- windows: don't listen for file attribute changes ([#520]) - - File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API, - with no way to see if they're a file write or attribute change, so would show - up as a fsnotify.Write event. This is never useful, and could result in many - spurious Write events. - -- windows: return `ErrEventOverflow` if the buffer is full ([#525]) - - Before it would merely return "short read", making it hard to detect this - error. - -- kqueue: make sure events for all files are delivered properly when removing a - watched directory ([#526]) - - Previously they would get sent with `""` (empty string) or `"."` as the path - name. - -- kqueue: don't emit spurious Create events for symbolic links ([#524]) - - The link would get resolved but kqueue would "forget" it already saw the link - itself, resulting on a Create for every Write event for the directory. - -- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516]) - -- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in - `backend_other.go`, making it easier to use on unsupported platforms such as - WASM, AIX, etc. ([#528]) - -- other: use the `backend_other.go` no-op if the `appengine` build tag is set; - Google AppEngine forbids usage of the unsafe package so the inotify backend - won't compile there. - -[#371]: https://github.com/fsnotify/fsnotify/pull/371 -[#516]: https://github.com/fsnotify/fsnotify/pull/516 -[#518]: https://github.com/fsnotify/fsnotify/pull/518 -[#520]: https://github.com/fsnotify/fsnotify/pull/520 -[#521]: https://github.com/fsnotify/fsnotify/pull/521 -[#524]: https://github.com/fsnotify/fsnotify/pull/524 -[#525]: https://github.com/fsnotify/fsnotify/pull/525 -[#526]: https://github.com/fsnotify/fsnotify/pull/526 -[#528]: https://github.com/fsnotify/fsnotify/pull/528 -[#537]: https://github.com/fsnotify/fsnotify/pull/537 -[#550]: https://github.com/fsnotify/fsnotify/pull/550 -[#572]: https://github.com/fsnotify/fsnotify/pull/572 - -1.6.0 - 2022-10-13 ------------------- -This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, -but not documented). It also increases the minimum Linux version to 2.6.32. - -### Additions - -- all: add `Event.Has()` and `Op.Has()` ([#477]) - - This makes checking events a lot easier; for example: - - if event.Op&Write == Write && !(event.Op&Remove == Remove) { - } - - Becomes: - - if event.Has(Write) && !event.Has(Remove) { - } - -- all: add cmd/fsnotify ([#463]) - - A command-line utility for testing and some examples. - -### Changes and fixes - -- inotify: don't ignore events for files that don't exist ([#260], [#470]) - - Previously the inotify watcher would call `os.Lstat()` to check if a file - still exists before emitting events. - - This was inconsistent with other platforms and resulted in inconsistent event - reporting (e.g. when a file is quickly removed and re-created), and generally - a source of confusion. It was added in 2013 to fix a memory leak that no - longer exists. - -- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's - not watched ([#460]) - -- inotify: replace epoll() with non-blocking inotify ([#434]) - - Non-blocking inotify was not generally available at the time this library was - written in 2014, but now it is. As a result, the minimum Linux version is - bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. - -- kqueue: don't check for events every 100ms ([#480]) - - The watcher would wake up every 100ms, even when there was nothing to do. Now - it waits until there is something to do. - -- macos: retry opening files on EINTR ([#475]) - -- kqueue: skip unreadable files ([#479]) - - kqueue requires a file descriptor for every file in a directory; this would - fail if a file was unreadable by the current user. Now these files are simply - skipped. - -- windows: fix renaming a watched directory if the parent is also watched ([#370]) - -- windows: increase buffer size from 4K to 64K ([#485]) - -- windows: close file handle on Remove() ([#288]) - -- kqueue: put pathname in the error if watching a file fails ([#471]) - -- inotify, windows: calling Close() more than once could race ([#465]) - -- kqueue: improve Close() performance ([#233]) - -- all: various documentation additions and clarifications. - -[#233]: https://github.com/fsnotify/fsnotify/pull/233 -[#260]: https://github.com/fsnotify/fsnotify/pull/260 -[#288]: https://github.com/fsnotify/fsnotify/pull/288 -[#370]: https://github.com/fsnotify/fsnotify/pull/370 -[#434]: https://github.com/fsnotify/fsnotify/pull/434 -[#460]: https://github.com/fsnotify/fsnotify/pull/460 -[#463]: https://github.com/fsnotify/fsnotify/pull/463 -[#465]: https://github.com/fsnotify/fsnotify/pull/465 -[#470]: https://github.com/fsnotify/fsnotify/pull/470 -[#471]: https://github.com/fsnotify/fsnotify/pull/471 -[#475]: https://github.com/fsnotify/fsnotify/pull/475 -[#477]: https://github.com/fsnotify/fsnotify/pull/477 -[#479]: https://github.com/fsnotify/fsnotify/pull/479 -[#480]: https://github.com/fsnotify/fsnotify/pull/480 -[#485]: https://github.com/fsnotify/fsnotify/pull/485 - -## [1.5.4] - 2022-04-25 - -* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) -* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) -* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) - -## [1.5.3] - 2022-04-22 - -* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) - -## [1.5.2] - 2022-04-21 - -* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) -* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) -* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) -* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) -* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) - -## [1.5.1] - 2021-08-24 - -* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) - -## [1.5.0] - 2021-08-20 - -* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) -* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) -* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) -* CI: Use GitHub Actions for CI and cover go 1.12-1.17 - [#378](https://github.com/fsnotify/fsnotify/pull/378) - [#381](https://github.com/fsnotify/fsnotify/pull/381) - [#385](https://github.com/fsnotify/fsnotify/pull/385) -* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) - -## [1.4.9] - 2020-03-11 - -* Move example usage to the readme #329. This may resolve #328. - -## [1.4.8] - 2020-03-10 - -* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) -* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) -* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) -* CI: Less verbosity (@nathany #267) -* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) -* Tests: Check if channels are closed in the example (@alexeykazakov #244) -* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) -* CI: Add windows to travis matrix (@cpuguy83 #284) -* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) -* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) -* Linux: open files with close-on-exec (@linxiulei #273) -* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) -* Project: Add go.mod (@nathany #309) -* Project: Revise editor config (@nathany #309) -* Project: Update copyright for 2019 (@nathany #309) -* CI: Drop go1.8 from CI matrix (@nathany #309) -* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) - -## [1.4.7] - 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## [1.4.2] - 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## [1.4.1] - 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## [1.4.0] - 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## [1.3.1] - 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## [1.3.0] - 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## [1.2.10] - 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## [1.2.9] - 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## [1.2.8] - 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## [1.2.5] - 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## [1.2.1] - 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## [1.2.0] - 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## [1.1.1] - 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## [1.1.0] - 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [1.0.4] - 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## [1.0.3] - 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## [1.0.2] - 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## [1.0.0] - 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## [0.9.3] - 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [0.9.2] - 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## [0.9.1] - 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## [0.9.0] - 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## [0.8.12] - 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## [0.8.11] - 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## [0.8.10] - 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## [0.8.9] - 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## [0.8.8] - 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## [0.8.7] - 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## [0.8.6] - 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## [0.8.5] - 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## [0.8.4] - 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## [0.8.3] - 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## [0.8.2] - 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## [0.8.1] - 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## [0.8.0] - 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## [0.7.4] - 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## [0.7.3] - 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## [0.7.2] - 2012-09-01 - -* kqueue: events for created directories - -## [0.7.1] - 2012-07-14 - -* [Fix] for renaming files - -## [0.7.0] - 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## [0.6.0] - 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## [0.5.1] - 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## [0.5.0] - 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## [0.4.0] - 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## [0.3.0] - 2012-02-19 - -* kqueue: add files when watch directory - -## [0.2.0] - 2011-12-30 - -* update to latest Go weekly code - -## [0.1.0] - 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/e2e/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md deleted file mode 100644 index ea379759d51..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -Thank you for your interest in contributing to fsnotify! We try to review and -merge PRs in a reasonable timeframe, but please be aware that: - -- To avoid "wasted" work, please discus changes on the issue tracker first. You - can just send PRs, but they may end up being rejected for one reason or the - other. - -- fsnotify is a cross-platform library, and changes must work reasonably well on - all supported platforms. - -- Changes will need to be compatible; old code should still compile, and the - runtime behaviour can't change in ways that are likely to lead to problems for - users. - -Testing -------- -Just `go test ./...` runs all the tests; the CI runs this on all supported -platforms. Testing different platforms locally can be done with something like -[goon] or [Vagrant], but this isn't super-easy to set up at the moment. - -Use the `-short` flag to make the "stress test" run faster. - - -[goon]: https://github.com/arp242/goon -[Vagrant]: https://www.vagrantup.com/ -[integration_test.go]: /integration_test.go diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/LICENSE b/e2e/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index fb03ade7506..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright © 2012 The Go Authors. All rights reserved. -Copyright © fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -* Neither the name of Google Inc. nor the names of its contributors may be used - to endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/README.md b/e2e/vendor/github.com/fsnotify/fsnotify/README.md deleted file mode 100644 index e480733d16c..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/README.md +++ /dev/null @@ -1,184 +0,0 @@ -fsnotify is a Go library to provide cross-platform filesystem notifications on -Windows, Linux, macOS, BSD, and illumos. - -Go 1.17 or newer is required; the full documentation is at -https://pkg.go.dev/github.com/fsnotify/fsnotify - ---- - -Platform support: - -| Backend | OS | Status | -| :-------------------- | :--------- | :------------------------------------------------------------------------ | -| inotify | Linux | Supported | -| kqueue | BSD, macOS | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FEN | illumos | Supported | -| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | -| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | -| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | -| USN Journals | Windows | [Needs support in x/sys/windows][usn] | -| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | - -Linux and illumos should include Android and Solaris, but these are currently -untested. - -[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 -[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 -[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 - -Usage ------ -A basic example: - -```go -package main - -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func main() { - // Create new watcher. - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - // Start listening for events. - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Has(fsnotify.Write) { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - // Add a path. - err = watcher.Add("/tmp") - if err != nil { - log.Fatal(err) - } - - // Block main goroutine forever. - <-make(chan struct{}) -} -``` - -Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be -run with: - - % go run ./cmd/fsnotify - -Further detailed documentation can be found in godoc: -https://pkg.go.dev/github.com/fsnotify/fsnotify - -FAQ ---- -### Will a file still be watched when it's moved to another directory? -No, not unless you are watching the location it was moved to. - -### Are subdirectories watched? -No, you must add watches for any directory you want to watch (a recursive -watcher is on the roadmap: [#18]). - -[#18]: https://github.com/fsnotify/fsnotify/issues/18 - -### Do I have to watch the Error and Event channels in a goroutine? -Yes. You can read both channels in the same goroutine using `select` (you don't -need a separate goroutine for both channels; see the example). - -### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? -fsnotify requires support from underlying OS to work. The current NFS and SMB -protocols does not provide network level support for file notifications, and -neither do the /proc and /sys virtual filesystems. - -This could be fixed with a polling watcher ([#9]), but it's not yet implemented. - -[#9]: https://github.com/fsnotify/fsnotify/issues/9 - -### Why do I get many Chmod events? -Some programs may generate a lot of attribute changes; for example Spotlight on -macOS, anti-virus programs, backup applications, and some others are known to do -this. As a rule, it's typically best to ignore Chmod events. They're often not -useful, and tend to cause problems. - -Spotlight indexing on macOS can result in multiple events (see [#15]). A -temporary workaround is to add your folder(s) to the *Spotlight Privacy -settings* until we have a native FSEvents implementation (see [#11]). - -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#15]: https://github.com/fsnotify/fsnotify/issues/15 - -### Watching a file doesn't work well -Watching individual files (rather than directories) is generally not recommended -as many programs (especially editors) update files atomically: it will write to -a temporary file which is then moved to to destination, overwriting the original -(or some variant thereof). The watcher on the original file is now lost, as that -no longer exists. - -The upshot of this is that a power failure or crash won't leave a half-written -file. - -Watch the parent directory and use `Event.Name` to filter out files you're not -interested in. There is an example of this in `cmd/fsnotify/file.go`. - -Platform-specific notes ------------------------ -### Linux -When a file is removed a REMOVE event won't be emitted until all file -descriptors are closed; it will emit a CHMOD instead: - - fp := os.Open("file") - os.Remove("file") // CHMOD - fp.Close() // REMOVE - -This is the event that inotify sends, so not much can be changed about this. - -The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for -the number of watches per user, and `fs.inotify.max_user_instances` specifies -the maximum number of inotify instances per user. Every Watcher you create is an -"instance", and every path you add is a "watch". - -These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and -`/proc/sys/fs/inotify/max_user_instances` - -To increase them you can use `sysctl` or write the value to proc file: - - # The default values on Linux 5.18 - sysctl fs.inotify.max_user_watches=124983 - sysctl fs.inotify.max_user_instances=128 - -To make the changes persist on reboot edit `/etc/sysctl.conf` or -`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your -distro's documentation): - - fs.inotify.max_user_watches=124983 - fs.inotify.max_user_instances=128 - -Reaching the limit will result in a "no space left on device" or "too many open -files" error. - -### kqueue (macOS, all BSD systems) -kqueue requires opening a file descriptor for every file that's being watched; -so if you're watching a directory with five files then that's six file -descriptors. You will run in to your system's "max open files" limit faster on -these platforms. - -The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to -control the maximum number of open files. diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/e2e/vendor/github.com/fsnotify/fsnotify/backend_fen.go deleted file mode 100644 index 28497f1dd8e..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ /dev/null @@ -1,640 +0,0 @@ -//go:build solaris -// +build solaris - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - mu sync.Mutex - port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), - done: make(chan struct{}), - } - - var err error - w.port, err = unix.NewEventPort() - if err != nil { - return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) - } - - go w.readEvents() - return w, nil -} - -// sendEvent attempts to send an event to the user, returning true if the event -// was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { - select { - case w.Events <- Event{Name: name, Op: op}: - return true - case <-w.done: - return false - } -} - -// sendError attempts to send an error to the user, returning true if the error -// was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - // Take the lock used by associateFile to prevent lingering events from - // being processed after the close - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed() { - return nil - } - close(w.done) - return w.port.Close() -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - if w.port.PathIsWatched(name) { - return nil - } - - _ = getOptions(opts...) - - // Currently we resolve symlinks that were explicitly requested to be - // watched. Otherwise we would use LStat here. - stat, err := os.Stat(name) - if err != nil { - return err - } - - // Associate all files in the directory. - if stat.IsDir() { - err := w.handleDirectory(name, stat, true, w.associateFile) - if err != nil { - return err - } - - w.mu.Lock() - w.dirs[name] = struct{}{} - w.mu.Unlock() - return nil - } - - err = w.associateFile(name, stat, true) - if err != nil { - return err - } - - w.mu.Lock() - w.watches[name] = struct{}{} - w.mu.Unlock() - return nil -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - if w.isClosed() { - return nil - } - if !w.port.PathIsWatched(name) { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - // The user has expressed an intent. Immediately remove this name from - // whichever watch list it might be in. If it's not in there the delete - // doesn't cause harm. - w.mu.Lock() - delete(w.watches, name) - delete(w.dirs, name) - w.mu.Unlock() - - stat, err := os.Stat(name) - if err != nil { - return err - } - - // Remove associations for every file in the directory. - if stat.IsDir() { - err := w.handleDirectory(name, stat, false, w.dissociateFile) - if err != nil { - return err - } - return nil - } - - err = w.port.DissociatePath(name) - if err != nil { - return err - } - - return nil -} - -// readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { - // If this function returns, the watcher has been closed and we can close - // these channels - defer func() { - close(w.Errors) - close(w.Events) - }() - - pevents := make([]unix.PortEvent, 8) - for { - count, err := w.port.Get(pevents, 1, nil) - if err != nil && err != unix.ETIME { - // Interrupted system call (count should be 0) ignore and continue - if errors.Is(err, unix.EINTR) && count == 0 { - continue - } - // Get failed because we called w.Close() - if errors.Is(err, unix.EBADF) && w.isClosed() { - return - } - // There was an error not caused by calling w.Close() - if !w.sendError(err) { - return - } - } - - p := pevents[:count] - for _, pevent := range p { - if pevent.Source != unix.PORT_SOURCE_FILE { - // Event from unexpected source received; should never happen. - if !w.sendError(errors.New("Event from unexpected source received")) { - return - } - continue - } - - err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } - } - } - } -} - -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { - files, err := os.ReadDir(path) - if err != nil { - return err - } - - // Handle all children of the directory. - for _, entry := range files { - finfo, err := entry.Info() - if err != nil { - return err - } - err = handler(filepath.Join(path, finfo.Name()), finfo, false) - if err != nil { - return err - } - } - - // And finally handle the directory itself. - return handler(path, stat, follow) -} - -// handleEvent might need to emit more than one fsnotify event if the events -// bitmap matches more than one event type (e.g. the file was both modified and -// had the attributes changed between when the association was created and the -// when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { - var ( - events = event.Events - path = event.Path - fmode = event.Cookie.(os.FileMode) - reRegister = true - ) - - w.mu.Lock() - _, watchedDir := w.dirs[path] - _, watchedPath := w.watches[path] - w.mu.Unlock() - isWatched := watchedDir || watchedPath - - if events&unix.FILE_DELETE != 0 { - if !w.sendEvent(path, Remove) { - return nil - } - reRegister = false - } - if events&unix.FILE_RENAME_FROM != 0 { - if !w.sendEvent(path, Rename) { - return nil - } - // Don't keep watching the new file name - reRegister = false - } - if events&unix.FILE_RENAME_TO != 0 { - // We don't report a Rename event for this case, because Rename events - // are interpreted as referring to the _old_ name of the file, and in - // this case the event would refer to the new name of the file. This - // type of rename event is not supported by fsnotify. - - // inotify reports a Remove event in this case, so we simulate this - // here. - if !w.sendEvent(path, Remove) { - return nil - } - // Don't keep watching the file that was removed - reRegister = false - } - - // The file is gone, nothing left to do. - if !reRegister { - if watchedDir { - w.mu.Lock() - delete(w.dirs, path) - w.mu.Unlock() - } - if watchedPath { - w.mu.Lock() - delete(w.watches, path) - w.mu.Unlock() - } - return nil - } - - // If we didn't get a deletion the file still exists and we're going to have - // to watch it again. Let's Stat it now so that we can compare permissions - // and have what we need to continue watching the file - - stat, err := os.Lstat(path) - if err != nil { - // This is unexpected, but we should still emit an event. This happens - // most often on "rm -r" of a subdirectory inside a watched directory We - // get a modify event of something happening inside, but by the time we - // get here, the sudirectory is already gone. Clearly we were watching - // this path but now it is gone. Let's tell the user that it was - // removed. - if !w.sendEvent(path, Remove) { - return nil - } - // Suppress extra write events on removed directories; they are not - // informative and can be confusing. - return nil - } - - // resolve symlinks that were explicitly watched as we would have at Add() - // time. this helps suppress spurious Chmod events on watched symlinks - if isWatched { - stat, err = os.Stat(path) - if err != nil { - // The symlink still exists, but the target is gone. Report the - // Remove similar to above. - if !w.sendEvent(path, Remove) { - return nil - } - // Don't return the error - } - } - - if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } - } - } else { - if !w.sendEvent(path, Write) { - return nil - } - } - } - if events&unix.FILE_ATTRIB != 0 && stat != nil { - // Only send Chmod if perms changed - if stat.Mode().Perm() != fmode.Perm() { - if !w.sendEvent(path, Chmod) { - return nil - } - } - } - - if stat != nil { - // If we get here, it means we've hit an event above that requires us to - // continue watching the file or directory - return w.associateFile(path, stat, isWatched) - } - return nil -} - -func (w *Watcher) updateDirectory(path string) error { - // The directory was modified, so we must find unwatched entities and watch - // them. If something was removed from the directory, nothing will happen, - // as everything else should still be watched. - files, err := os.ReadDir(path) - if err != nil { - return err - } - - for _, entry := range files { - path := filepath.Join(path, entry.Name()) - if w.port.PathIsWatched(path) { - continue - } - - finfo, err := entry.Info() - if err != nil { - return err - } - err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } - } - if !w.sendEvent(path, Create) { - return nil - } - } - return nil -} - -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { - if w.isClosed() { - return ErrClosed - } - // This is primarily protecting the call to AssociatePath but it is - // important and intentional that the call to PathIsWatched is also - // protected by this mutex. Without this mutex, AssociatePath has been seen - // to error out that the path is already associated. - w.mu.Lock() - defer w.mu.Unlock() - - if w.port.PathIsWatched(path) { - // Remove the old association in favor of this one If we get ENOENT, - // then while the x/sys/unix wrapper still thought that this path was - // associated, the underlying event port did not. This call will have - // cleared up that discrepancy. The most likely cause is that the event - // has fired but we haven't processed it yet. - err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { - return err - } - } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB - } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) -} - -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { - if !w.port.PathIsWatched(path) { - return nil - } - return w.port.DissociatePath(path) -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - if w.isClosed() { - return nil - } - - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)+len(w.dirs)) - for pathname := range w.dirs { - entries = append(entries, pathname) - } - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/e2e/vendor/github.com/fsnotify/fsnotify/backend_inotify.go deleted file mode 100644 index 921c1c1e401..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ /dev/null @@ -1,594 +0,0 @@ -//go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - // Store fd here as os.File.Read() will no longer return on close after - // calling Fd(). See: https://github.com/golang/go/issues/26439 - fd int - inotifyFile *os.File - watches *watches - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex - doneResp chan struct{} // Channel to respond to Close -} - -type ( - watches struct { - mu sync.RWMutex - wd map[uint32]*watch // wd → watch - path map[string]uint32 // pathname → wd - } - watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. - } -) - -func newWatches() *watches { - return &watches{ - wd: make(map[uint32]*watch), - path: make(map[string]uint32), - } -} - -func (w *watches) len() int { - w.mu.RLock() - defer w.mu.RUnlock() - return len(w.wd) -} - -func (w *watches) add(ww *watch) { - w.mu.Lock() - defer w.mu.Unlock() - w.wd[ww.wd] = ww - w.path[ww.path] = ww.wd -} - -func (w *watches) remove(wd uint32) { - w.mu.Lock() - defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) - delete(w.wd, wd) -} - -func (w *watches) removePath(path string) (uint32, bool) { - w.mu.Lock() - defer w.mu.Unlock() - - wd, ok := w.path[path] - if !ok { - return 0, false - } - - delete(w.path, path) - delete(w.wd, wd) - - return wd, true -} - -func (w *watches) byPath(path string) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[w.path[path]] -} - -func (w *watches) byWd(wd uint32) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[wd] -} - -func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { - w.mu.Lock() - defer w.mu.Unlock() - - var existing *watch - wd, ok := w.path[path] - if ok { - existing = w.wd[wd] - } - - upd, err := f(existing) - if err != nil { - return err - } - if upd != nil { - w.wd[upd.wd] = upd - w.path[upd.path] = upd.wd - - if upd.wd != wd { - delete(w.wd, wd) - } - } - - return nil -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - // Need to set nonblocking mode for SetDeadline to work, otherwise blocking - // I/O operations won't terminate on close. - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) - if fd == -1 { - return nil, errno - } - - w := &Watcher{ - fd: fd, - inotifyFile: os.NewFile(uintptr(fd), ""), - watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - return false - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() - if w.isClosed() { - w.closeMu.Unlock() - return nil - } - close(w.done) - w.closeMu.Unlock() - - // Causes any blocking reads to return with an error, provided the file - // still supports deadline operations. - err := w.inotifyFile.Close() - if err != nil { - return err - } - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - - name = filepath.Clean(name) - _ = getOptions(opts...) - - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { - if existing != nil { - flags |= existing.flags | unix.IN_MASK_ADD - } - - wd, err := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return nil, err - } - - if existing == nil { - return &watch{ - wd: uint32(wd), - path: name, - flags: flags, - }, nil - } - - existing.wd = uint32(wd) - existing.flags = flags - return existing, nil - }) -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - if w.isClosed() { - return nil - } - return w.remove(filepath.Clean(name)) -} - -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno - } - return nil -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - if w.isClosed() { - return nil - } - - entries := make([]string, 0, w.watches.len()) - w.watches.mu.RLock() - for pathname := range w.watches.path { - entries = append(entries, pathname) - } - w.watches.mu.RUnlock() - - return entries -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - defer func() { - close(w.doneResp) - close(w.Errors) - close(w.Events) - }() - - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) - for { - // See if we have been closed. - if w.isClosed() { - return - } - - n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: - if !w.sendError(err) { - return - } - continue - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - err = io.EOF // If EOF is received. This should really never happen. - } else if n < 0 { - err = errno // If an error occurred while reading. - } else { - err = errors.New("notify: short read in readEvents()") // Read was too short. - } - if !w.sendError(err) { - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - ) - - if mask&unix.IN_Q_OVERFLOW != 0 { - if !w.sendError(ErrEventOverflow) { - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - watch := w.watches.byWd(uint32(raw.Wd)) - - // inotify will automatically remove the watch on deletes; just need - // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - w.watches.remove(watch.wd) - } - // We can't really update the state when a watched path is moved; - // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove - // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { - err := w.remove(watch.path) - if err != nil && !errors.Is(err, ErrNonExistentWatch) { - if !w.sendError(err) { - return - } - } - } - - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := w.newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/e2e/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go deleted file mode 100644 index 063a0915a07..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ /dev/null @@ -1,782 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - kq, closepipe, err := newKqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// newKqueue creates a new kernel event queue and returns a descriptor. -// -// This registers a new event on closepipe, which will trigger an event when -// it's closed. This way we can use kevent() without timeout/polling; without -// the closepipe, it would block forever and we wouldn't be able to stop it at -// all. -func newKqueue() (kq int, closepipe [2]int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, closepipe, err - } - - // Register the close pipe. - err = unix.Pipe(closepipe[:]) - if err != nil { - unix.Close(kq) - return kq, closepipe, err - } - - // Register changes to listen on the closepipe. - changes := make([]unix.Kevent_t, 1) - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, - unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) - - ok, err := unix.Kevent(kq, changes, nil, nil) - if ok == -1 { - unix.Close(kq) - unix.Close(closepipe[0]) - unix.Close(closepipe[1]) - return kq, closepipe, err - } - return kq, closepipe, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - return false - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks - for _, name := range pathsToRemove { - w.Remove(name) - } - - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) - close(w.done) - - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) - - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - return w.remove(name, true) -} - -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) - if err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } - - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error to - // the user, as that will just confuse them with an error about a - // path they did not explicitly watch themselves. - w.Remove(name) - } - } - return nil -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { - return nil - } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// addWatch adds name to the watched file set; the flags are interpreted as -// described in kevent(2). -// -// Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", ErrClosed - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets or named pipes - if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { - return "", nil - } - - // Follow Symlinks. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - link, err := os.Readlink(name) - if err != nil { - // Return nil because Linux can add unresolvable symlinks to the - // watch list without problems, so maintain consistency with - // that. There will be no file events for broken symlinks. - // TODO: more specific check; returns os.PathError; ENOENT? - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - - if alreadyWatching { - // Add to watches so we don't get spurious Create events later - // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} - return link, nil - } - - name = link - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - // Retry on EINTR; open() can return EINTR in practice on macOS. - // See #354, and Go issues 11180 and 39237. - for { - watchfd, err = unix.Open(name, openMode, 0) - if err == nil { - break - } - if errors.Is(err, unix.EINTR) { - continue - } - - return "", err - } - - isDir = fi.IsDir() - } - - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) - if err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - defer func() { - close(w.Events) - close(w.Errors) - _ = unix.Close(w.kq) - unix.Close(w.closepipe[0]) - }() - - eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { - kevents, err := w.read(eventBuffer) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true - } - continue - } - - // Flush the events we received to the Events channel - for _, kevent := range kevents { - var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) - ) - - // Shut down the loop when the pipe is closed, but only after all - // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue - } - - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - - event := w.newEvent(path.name, mask) - - if event.Has(Rename) || event.Has(Remove) { - w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } - } - - if event.Has(Remove) { - // Look for a file that may have overwritten this; for example, - // mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } - } - } - } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } - } - } - } - } - } - } -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - // No point sending a write and delete event at the same time: if it's gone, - // then it's gone. - if e.Op.Has(Write) && e.Op.Has(Remove) { - e.Op &^= Write - } - return e -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := os.ReadDir(dirPath) - if err != nil { - return err - } - - for _, f := range files { - path := filepath.Join(dirPath, f.Name()) - - fi, err := f.Info() - if err != nil { - return fmt.Errorf("%q: %w", path, err) - } - - cleanPath, err := w.internalWatch(path, fi) - if err != nil { - // No permission to read the file; that's not a problem: just skip. - // But do add it to w.fileExists to prevent it from being picked up - // as a "new" file later (it still shows up in the directory - // listing). - switch { - case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): - cleanPath = filepath.Clean(path) - default: - return fmt.Errorf("%q: %w", path, err) - } - } - - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() - } - - return nil -} - -// Search the directory for new files and send an event for them. -// -// This functionality is to have the BSD watcher match the inotify, which sends -// a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { - files, err := os.ReadDir(dir) - if err != nil { - // Directory no longer exists: we can ignore this safely. kqueue will - // still give us the correct events. - if errors.Is(err, os.ErrNotExist) { - return nil - } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) - } - - for _, f := range files { - fi, err := f.Info() - if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) - } - - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) - if err != nil { - // Don't need to send an error if this file isn't readable. - if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { - return nil - } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) - } - } - return nil -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { - if fi.IsDir() { - // mimic Linux providing delete events for subdirectories, but preserve - // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - for i, fd := range fds { - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // Register the events. - success, err := unix.Kevent(w.kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(w.kq, nil, events, nil) - if err != nil { - return nil, err - } - return events[0:n], nil -} diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/backend_other.go b/e2e/vendor/github.com/fsnotify/fsnotify/backend_other.go deleted file mode 100644 index d34a23c015f..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ /dev/null @@ -1,205 +0,0 @@ -//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import "errors" - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("fsnotify not supported on the current platform") -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/e2e/vendor/github.com/fsnotify/fsnotify/backend_windows.go deleted file mode 100644 index 9bc91e5d613..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ /dev/null @@ -1,827 +0,0 @@ -//go:build windows -// +build windows - -// Windows backend based on ReadDirectoryChangesW() -// -// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/windows" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - port windows.Handle // Handle to completion port - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error - - mu sync.Mutex // Protects access to watches, closed - watches watchMap // Map of watches (key: i-number) - closed bool // Set to true when Close() is first called -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) - if err != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", err) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - w.mu.Lock() - defer w.mu.Unlock() - return w.closed -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - - event := w.newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.quit: - } - return false -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - w.mu.Lock() - w.closed = true - w.mu.Unlock() - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - - with := getOptions(opts...) - if with.bufsize < 4096 { - return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") - } - - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - bufsize: with.bufsize, - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - if w.isClosed() { - return nil - } - - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - if w.isClosed() { - return nil - } - - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -// These options are from the old golang.org/x/exp/winfsnotify, where you could -// add various options to the watch. This has long since been removed. -// -// The "sys" in the name is misleading as they're not part of any "system". -// -// This should all be removed at some point, and just use windows.FILE_NOTIFY_* -const ( - sysFSALLEVENTS = 0xfff - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - sysFSIGNORED = 0x8000 -) - -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - bufsize int - reply chan error -} - -type inode struct { - handle windows.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov windows.Overlapped - ino *inode // i-number - recurse bool // Recursive watch? - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf []byte // buffer, allocated later -} - -type ( - indexMap map[uint64]*watch - watchMap map[uint32]indexMap -) - -func (w *Watcher) wakeupReader() error { - err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if err != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", err) - } - return nil -} - -func (w *Watcher) getDir(pathname string) (dir string, err error) { - attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) - if err != nil { - return "", os.NewSyscallError("GetFileAttributes", err) - } - if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func (w *Watcher) getIno(path string) (ino *inode, err error) { - h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), - windows.FILE_LIST_DIRECTORY, - windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, - nil, windows.OPEN_EXISTING, - windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) - if err != nil { - return nil, os.NewSyscallError("CreateFile", err) - } - - var fi windows.ByHandleFileInformation - err = windows.GetFileInformationByHandle(h, &fi) - if err != nil { - windows.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", err) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false - - dir, err := w.getDir(pathname) - if err != nil { - return err - } - - ino, err := w.getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) - if err != nil { - windows.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", err) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - recurse: recurse, - buf: make([]byte, bufsize), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - windows.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - - err = w.startRead(watchEntry) - if err != nil { - return err - } - - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - pathname, recurse := recursivePath(pathname) - - dir, err := w.getDir(pathname) - if err != nil { - return err - } - ino, err := w.getIno(dir) - if err != nil { - return err - } - - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - - if recurse && !watch.recurse { - return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) - } - - err = windows.CloseHandle(ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - if watch == nil { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - err := windows.CancelIo(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CancelIo", err)) - w.deleteWatch(watch) - } - mask := w.toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= w.toWindowsFlags(m) - } - if mask == 0 { - err := windows.CloseHandle(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - - // We need to pass the array, rather than the slice. - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) - rdErr := windows.ReadDirectoryChanges(watch.ino.handle, - (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), - watch.recurse, mask, nil, &watch.ov, 0) - if rdErr != nil { - err := os.NewSyscallError("ReadDirectoryChanges", rdErr) - if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n uint32 - key uintptr - ov *windows.Overlapped - ) - runtime.LockOSThread() - - for { - // This error is handled after the watch == nil check below. - qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) - - watch := (*watch)(unsafe.Pointer(ov)) - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - - err := windows.CloseHandle(w.port) - if err != nil { - err = os.NewSyscallError("CloseHandle", err) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch qErr { - case nil: - // No error - case windows.ERROR_MORE_DATA: - if watch == nil { - w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case windows.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case windows.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) - continue - } - - var offset uint32 - for { - if n == 0 { - w.sendError(ErrEventOverflow) - break - } - - // Point "raw" to the event in the buffer - raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - - // Create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := windows.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case windows.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case windows.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case windows.FILE_ACTION_RENAMED_NEW_NAME: - // Update saved path of all sub-watches. - old := filepath.Join(watch.path, watch.rename) - w.mu.Lock() - for _, watchMap := range w.watches { - for _, ww := range watchMap { - if strings.HasPrefix(ww.path, old) { - ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) - } - } - } - w.mu.Unlock() - - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } - if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) - if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) - break - } - } - - if err := w.startRead(watch); err != nil { - w.sendError(err) - } - } -} - -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSMODIFY != 0 { - m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { - switch action { - case windows.FILE_ACTION_ADDED: - return sysFSCREATE - case windows.FILE_ACTION_REMOVED: - return sysFSDELETE - case windows.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case windows.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/e2e/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 24c99cc4999..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,146 +0,0 @@ -// Package fsnotify provides a cross-platform interface for file system -// notifications. -// -// Currently supported systems: -// -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN -package fsnotify - -import ( - "errors" - "fmt" - "path/filepath" - "strings" -) - -// Event represents a file system notification. -type Event struct { - // Path to the file or directory. - // - // Paths are relative to the input; for example with Add("dir") the Name - // will be set to "dir/file" if you create that file, but if you use - // Add("/path/to/dir") it will be "/path/to/dir/file". - Name string - - // File operation that triggered the event. - // - // This is a bitmask and some systems may send multiple operations at once. - // Use the Event.Has() method instead of comparing with ==. - Op Op -} - -// Op describes a set of file operations. -type Op uint32 - -// The operations fsnotify can trigger; see the documentation on [Watcher] for a -// full description, and check them with [Event.Has]. -const ( - // A new pathname was created. - Create Op = 1 << iota - - // The pathname was written to; this does *not* mean the write has finished, - // and a write can be followed by more writes. - Write - - // The path was removed; any watches on it will be removed. Some "remove" - // operations may trigger a Rename if the file is actually moved (for - // example "remove to trash" is often a rename). - Remove - - // The path was renamed to something else; any watched on it will be - // removed. - Rename - - // File attributes were changed. - // - // It's generally not recommended to take action on this event, as it may - // get triggered very frequently by some software. For example, Spotlight - // indexing on macOS, anti-virus software, backup software, etc. - Chmod -) - -// Common errors that can be reported. -var ( - ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") -) - -func (o Op) String() string { - var b strings.Builder - if o.Has(Create) { - b.WriteString("|CREATE") - } - if o.Has(Remove) { - b.WriteString("|REMOVE") - } - if o.Has(Write) { - b.WriteString("|WRITE") - } - if o.Has(Rename) { - b.WriteString("|RENAME") - } - if o.Has(Chmod) { - b.WriteString("|CHMOD") - } - if b.Len() == 0 { - return "[no events]" - } - return b.String()[1:] -} - -// Has reports if this operation has the given operation. -func (o Op) Has(h Op) bool { return o&h != 0 } - -// Has reports if this event has the given operation. -func (e Event) Has(op Op) bool { return e.Op.Has(op) } - -// String returns a string representation of the event with their path. -func (e Event) String() string { - return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) -} - -type ( - addOpt func(opt *withOpts) - withOpts struct { - bufsize int - } -) - -var defaultOpts = withOpts{ - bufsize: 65536, // 64K -} - -func getOptions(opts ...addOpt) withOpts { - with := defaultOpts - for _, o := range opts { - o(&with) - } - return with -} - -// WithBufferSize sets the [ReadDirectoryChangesW] buffer size. -// -// This only has effect on Windows systems, and is a no-op for other backends. -// -// The default value is 64K (65536 bytes) which is the highest value that works -// on all filesystems and should be enough for most applications, but if you -// have a large burst of events it may not be enough. You can increase it if -// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). -// -// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -func WithBufferSize(bytes int) addOpt { - return func(opt *withOpts) { opt.bufsize = bytes } -} - -// Check if this path is recursive (ends with "/..." or "\..."), and return the -// path with the /... stripped. -func recursivePath(path string) (string, bool) { - if filepath.Base(path) == "..." { - return filepath.Dir(path), true - } - return path, false -} diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/e2e/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae6539..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/e2e/vendor/github.com/fsnotify/fsnotify/system_bsd.go deleted file mode 100644 index 4322b0b8855..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/e2e/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/e2e/vendor/github.com/fsnotify/fsnotify/system_darwin.go deleted file mode 100644 index 5da5ffa78fe..00000000000 --- a/e2e/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build darwin -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/e2e/vendor/github.com/fxamacker/cbor/v2/README.md b/e2e/vendor/github.com/fxamacker/cbor/v2/README.md index af0a79507e5..d072b81c730 100644 --- a/e2e/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/e2e/vendor/github.com/fxamacker/cbor/v2/README.md @@ -1,30 +1,31 @@ -# CBOR Codec in Go - - +

CBOR Codec Go logo

[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. -`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). +`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). -See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer. ## fxamacker/cbor [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) -[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22) [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) +[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage) `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options. + Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. -
Highlights

+

🔎  Highlights

__🚀  Speed__ @@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili __🗜️  Data Size__ -Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. __:jigsaw:  Usability__ @@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949. `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. -By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). - -

Example decoding with encoding/gob 💥 fatal error (out of memory)

- -```Go -// Example of encoding/gob having "fatal error: runtime: out of memory" -// while decoding 181 bytes. -package main -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" -) - -// Example data is from https://github.com/golang/go/issues/24446 -// (shortened to 181 bytes). -const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + - "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + - "860001013001ff860001013001ffb80000001eff850401010e3030303030" + - "30303030303030303001ff3000010c0104000016ffb70201010830303030" + - "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + - "303030303030303030303030303030303030303030303030303030303030" + - "30" - -type X struct { - J *X - K map[string]int -} - -func main() { - raw, _ := hex.DecodeString(data) - decoder := gob.NewDecoder(bytes.NewReader(raw)) - - var x X - decoder.Decode(&x) // fatal error: runtime: out of memory - fmt.Println("Decoding finished.") -} -``` - -


- -
- -`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to -decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | - -
Benchmark details

- -Latest comparison used: -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) -- go test -bench=. -benchmem -count=20 - -#### Prior comparisons - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | -| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | -| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | - -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.6, linux/amd64, i5-13600K (DDR4) -- go test -bench=. -benchmem -count=20 - -


- -
- -### Smaller Encodings with Struct Tags - -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. - -
Example encoding 3-level nested Go struct to 1 byte CBOR

- -https://go.dev/play/p/YxwvfPdFQG2 - -```Go -// Example encoding nested struct (with omitempty tag) -// - encoding/json: 18 byte JSON -// - fxamacker/cbor: 1 byte CBOR -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/fxamacker/cbor/v2" -) - -type GrandChild struct { - Quux int `json:",omitempty"` -} - -type Child struct { - Baz int `json:",omitempty"` - Qux GrandChild `json:",omitempty"` -} - -type Parent struct { - Foo Child `json:",omitempty"` - Bar int `json:",omitempty"` -} - -func cb() { - results, _ := cbor.Marshal(Parent{}) - fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) - - text, _ := cbor.Diagnose(results) // Diagnostic Notation - fmt.Println("DN: " + text) -} - -func js() { - results, _ := json.Marshal(Parent{}) - fmt.Println("hex(JSON): " + hex.EncodeToString(results)) - - text := string(results) // JSON - fmt.Println("JSON: " + text) -} - -func main() { - cb() - fmt.Println("-------------") - js() -} -``` - -Output (DN is Diagnostic Notation): -``` -hex(CBOR): a0 -DN: {} -------------- -hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d -JSON: {"Foo":{"Qux":{}}} -``` - -


- -
- -Example using different struct tags together: +Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data. + +> [!NOTE] +> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`: +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op | +> +> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference. +> +>
🔎  Benchmark details

+> +> Latest comparison for decoding CBOR data to Go `[]byte`: +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores) +> - go test -bench=. -benchmem -count=20 +> +> #### Prior comparisons +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | +> +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.19.6, linux/amd64, i5-13600K (DDR4) +> - go test -bench=. -benchmem -count=20 +> +>

+ +In contrast, some codecs can crash or use excessive resources while decoding bad data. + +> [!WARNING] +> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). +> +>
🔎  gob fatal error (out of memory) 💥 decoding 181 bytes

+> +> ```Go +> // Example of encoding/gob having "fatal error: runtime: out of memory" +> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024). +> package main +> import ( +> "bytes" +> "encoding/gob" +> "encoding/hex" +> "fmt" +> ) +> +> // Example data is from https://github.com/golang/go/issues/24446 +> // (shortened to 181 bytes). +> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + +> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + +> "860001013001ff860001013001ffb80000001eff850401010e3030303030" + +> "30303030303030303001ff3000010c0104000016ffb70201010830303030" + +> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + +> "303030303030303030303030303030303030303030303030303030303030" + +> "30" +> +> type X struct { +> J *X +> K map[string]int +> } +> +> func main() { +> raw, _ := hex.DecodeString(data) +> decoder := gob.NewDecoder(bytes.NewReader(raw)) +> +> var x X +> decoder.Decode(&x) // fatal error: runtime: out of memory +> fmt.Println("Decoding finished.") +> } +> ``` +> +> +>

+ +### Smaller Encodings with Struct Tag Options + +Struct tags automatically reduce encoded size of structs and improve speed. + +We can write less code by using struct tag options: +- `toarray`: encode without field names (decode back to original struct) +- `keyasint`: encode field names as integers (decode back to original struct) +- `omitempty`: omit empty field when encoding +- `omitzero`: omit zero-value field when encoding + +As a special case, struct field tag "-" omits the field. + +NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field. ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") -API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. +> [!NOTE] +> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte! +> - `encoding/json`: 18 bytes of JSON +> - `fxamacker/cbor`: 1 byte of CBOR +> +>
🔎  Encoding 3-level nested Go struct with omitempty

+> +> https://go.dev/play/p/YxwvfPdFQG2 +> +> ```Go +> // Example encoding nested struct (with omitempty tag) +> // - encoding/json: 18 byte JSON +> // - fxamacker/cbor: 1 byte CBOR +> +> package main +> +> import ( +> "encoding/hex" +> "encoding/json" +> "fmt" +> +> "github.com/fxamacker/cbor/v2" +> ) +> +> type GrandChild struct { +> Quux int `json:",omitempty"` +> } +> +> type Child struct { +> Baz int `json:",omitempty"` +> Qux GrandChild `json:",omitempty"` +> } +> +> type Parent struct { +> Foo Child `json:",omitempty"` +> Bar int `json:",omitempty"` +> } +> +> func cb() { +> results, _ := cbor.Marshal(Parent{}) +> fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) +> +> text, _ := cbor.Diagnose(results) // Diagnostic Notation +> fmt.Println("DN: " + text) +> } +> +> func js() { +> results, _ := json.Marshal(Parent{}) +> fmt.Println("hex(JSON): " + hex.EncodeToString(results)) +> +> text := string(results) // JSON +> fmt.Println("JSON: " + text) +> } +> +> func main() { +> cb() +> fmt.Println("-------------") +> js() +> } +> ``` +> +> Output (DN is Diagnostic Notation): +> ``` +> hex(CBOR): a0 +> DN: {} +> ------------- +> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +> JSON: {"Foo":{"Qux":{}}} +> ``` +> +>

+ ## Quick Start __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. +> [!TIP] +> +> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta). +> +>
🔎  More about tinygo feature branch +> +> ### Tinygo +> +> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go). +> +> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo. +> +> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet. +> +> Changes in this feature branch only affect tinygo compiled software. Summary of changes: +> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33. +> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature. +> - encoding error message can be different when encoding function type. +> +> Related tinygo issues: +> - https://github.com/tinygo-org/tinygo/issues/4277 +> - https://github.com/tinygo-org/tinygo/issues/4458 +> +>
+ + ### Key Points This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). @@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text -// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, -// but new funcs UnmarshalFirst and DiagnoseFirst do not. +// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but +// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes. ``` -__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. - -- Different CBOR libraries may use different default settings. -- CBOR-based formats or protocols usually require specific settings. - -For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. +> [!IMPORTANT] +> CBOR settings allow trade-offs between speed, security, encoding size, etc. +> +> - Different CBOR libraries may use different default settings. +> - CBOR-based formats or protocols usually require specific settings. +> +> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. ### Presets @@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf ### Struct Tags -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs. + +As a special case, struct field tag "-" omits the field. + +
🔎  Example encoding with struct field tag "-"

+ +https://go.dev/play/p/aWEIFxd7InX + +```Go +// https://github.com/fxamacker/cbor/issues/652 +package main + +import ( + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// The `cbor:"-"` tag omits the Type field when encoding to CBOR. +type Entity struct { + _ struct{} `cbor:",toarray"` + ID uint64 `json:"id"` + Type string `cbor:"-" json:"typeOf"` + Name string `json:"name"` +} + +func main() { + entity := Entity{ + ID: 1, + Type: "int64", + Name: "Identifier", + } + + c, _ := cbor.Marshal(entity) + diag, _ := cbor.Diagnose(c) + fmt.Printf("CBOR in hex: %x\n", c) + fmt.Printf("CBOR in edn: %s\n", diag) + + j, _ := json.Marshal(entity) + fmt.Printf("JSON: %s\n", string(j)) + + fmt.Printf("JSON encoding is %d bytes\n", len(j)) + fmt.Printf("CBOR encoding is %d bytes\n", len(c)) + + // Output: + // CBOR in hex: 82016a4964656e746966696572 + // CBOR in edn: [1, "Identifier"] + // JSON: {"id":1,"typeOf":"int64","name":"Identifier"} + // JSON encoding is 45 bytes + // CBOR encoding is 13 bytes +} +``` + +

-
Example encoding 3-level nested Go struct to 1 byte CBOR

+

🔎  Example encoding 3-level nested Go struct to 1 byte CBOR

https://go.dev/play/p/YxwvfPdFQG2 @@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}

-
Example using several struct tags

+

🔎  Example using struct tag options

![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. +Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. ### CBOR Tags @@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags `TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. -
Example using TagSet and TagOptions

+

🔎  Example using TagSet and TagOptions

```go // Use signedCWT struct defined in "Decoding CWT" example. @@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil { em, _ := cbor.EncOptions{}.EncModeWithTags(tags) // Marshal signedCWT with tag number. -if data, err := cbor.Marshal(v); err != nil { +if data, err := em.Marshal(v); err != nil { return err } ```

+👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces. + +Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc. + +The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2). + +
🔎  Example using Embedded JSON Tag for CBOR (tag 262) + +```go +// https://github.com/fxamacker/cbor/issues/657 + +package cbor_test + +// NOTE: RFC 8949 does not mention tag number 262. IANA assigned +// CBOR tag number 262 as "Embedded JSON Object" specified by the +// document Embedded JSON Tag for CBOR: +// +// "Tag 262 can be applied to a byte string (major type 2) to indicate +// that the byte string is a JSON Object. The length of the byte string +// indicates the content." +// +// For more info, see Embedded JSON Tag for CBOR at: +// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// cborTagNumForEmbeddedJSON is the CBOR tag number 262. +const cborTagNumForEmbeddedJSON = 262 + +// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item +// with tag number 262 and the tag content is a JSON object "embedded" as a +// CBOR byte string (major type 2). +type EmbeddedJSON struct { + any +} + +func NewEmbeddedJSON(val any) EmbeddedJSON { + return EmbeddedJSON{val} +} + +// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the +// tag number 262 and the tag content is a JSON object that is +// "embedded" as a CBOR byte string. +func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) { + // Encode v to JSON object. + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // Create cbor.Tag representing a tagged CBOR data item. + tag := cbor.Tag{ + Number: cborTagNumForEmbeddedJSON, + Content: data, + } + + // Marshal to a tagged CBOR data item. + return cbor.Marshal(tag) +} + +// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON. +// The byte slice provided to this function must contain a single +// tagged CBOR data item with the tag number 262 and tag content +// must be a JSON object "embedded" as a CBOR byte string. +func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error { + // Unmarshal tagged CBOR data item. + var tag cbor.Tag + if err := cbor.Unmarshal(b, &tag); err != nil { + return err + } + + // Check tag number. + if tag.Number != cborTagNumForEmbeddedJSON { + return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON) + } + + // Check tag content. + jsonData, isByteString := tag.Content.([]byte) + if !isByteString { + return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content) + } + + // Unmarshal JSON object. + return json.Unmarshal(jsonData, v) +} + +// MarshalJSON encodes EmbeddedJSON to a JSON object. +func (v EmbeddedJSON) MarshalJSON() ([]byte, error) { + return json.Marshal(v.any) +} + +// UnmarshalJSON decodes a JSON object. +func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error { + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + return dec.Decode(&v.any) +} + +func Example_embeddedJSONTagForCBOR() { + value := NewEmbeddedJSON(map[string]any{ + "name": "gopher", + "id": json.Number("42"), + }) + + data, err := cbor.Marshal(value) + if err != nil { + panic(err) + } + + fmt.Printf("cbor: %x\n", data) + + var v EmbeddedJSON + err = cbor.Unmarshal(data, &v) + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", v.any) + for k, v := range v.any.(map[string]any) { + fmt.Printf(" %s: %v (%T)\n", k, v, v) + } +} +``` + +
+ + ### Functions and Interfaces -
Functions and interfaces at a glance

+

🔎  Functions and interfaces at a glance

Common functions with same API as `encoding/json`: - `Marshal`, `Unmarshal` @@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed. Other useful functions: - `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. - `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. -- `Wellformed` returns true if the the CBOR data item is well-formed. +- `Wellformed` returns true if the CBOR data item is well-formed. Interfaces identical or comparable to Go `encoding` packages include: `Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. @@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. +- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. +- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. +- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. + +v2.9.0 passed fuzz tests and is production quality. + +The minimum version of Go required to build: +- v2.8.0 and newer releases require go 1.20+. +- v2.7.1 and older releases require go 1.17+. For more details, see [release notes](https://github.com/fxamacker/cbor/releases). -### Prior Release +### Prior Releases + +[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. + +[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. -v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). +[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). __IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. @@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. Do the allocation by running takeByTopologyNUMAPacked() over the -// available CPUs in that NUMA node and return -// -// Otherwise, for each pair of NUMA nodes: -// - If the set of requested CPUs (modulo 2) can be evenly split across -// the 2 NUMA nodes; AND -// - Any remaining CPUs (after the modulo operation) can be striped across -// some subset of the NUMA nodes; -// --> Do the allocation by running takeByTopologyNUMAPacked() over the -// available CPUs in both NUMA nodes and return -// -// Otherwise, for each 3-tuple of NUMA nodes: -// - If the set of requested CPUs (modulo 3) can be evenly distributed -// across the 3 NUMA nodes; AND -// - Any remaining CPUs (after the modulo operation) can be striped across -// some subset of the NUMA nodes; -// --> Do the allocation by running takeByTopologyNUMAPacked() over the -// available CPUs in all three NUMA nodes and return -// -// ... -// -// Otherwise, for the set of all NUMA nodes: -// - If the set of requested CPUs (modulo NUM_NUMA_NODES) can be evenly -// distributed across all NUMA nodes; AND -// - Any remaining CPUs (after the modulo operation) can be striped across -// some subset of the NUMA nodes; -// --> Do the allocation by running takeByTopologyNUMAPacked() over the -// available CPUs in all NUMA nodes and return -// -// If none of the above conditions can be met, then resort back to a -// best-effort fit of packing CPUs into NUMA nodes by calling -// takeByTopologyNUMAPacked() over all available CPUs. -// -// NOTE: A "balance score" will be calculated to help find the best subset of -// NUMA nodes to allocate any 'remainder' CPUs from (in cases where the total -// number of CPUs to allocate cannot be evenly distributed across the chosen -// set of NUMA nodes). This "balance score" is calculated as the standard -// deviation of how many CPUs will be available on each NUMA node after all -// evenly distributed and remainder CPUs are allocated. The subset with the -// lowest "balance score" will receive the CPUs in order to keep the overall -// allocation of CPUs as "balanced" as possible. -// -// NOTE: This algorithm has been generalized to take an additional -// 'cpuGroupSize' parameter to ensure that CPUs are always allocated in groups -// of size 'cpuGroupSize' according to the algorithm described above. This is -// important, for example, to ensure that all CPUs (i.e. all hyperthreads) from -// a single core are allocated together. -func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuGroupSize int, cpuSortingStrategy CPUSortingStrategy) (cpuset.CPUSet, error) { - // If the number of CPUs requested cannot be handed out in chunks of - // 'cpuGroupSize', then we just call out the packing algorithm since we - // can't distribute CPUs in this chunk size. - // PreferAlignByUncoreCache feature not implemented here yet and set to false. - // Support for PreferAlignByUncoreCache to be done at beta release. - if (numCPUs % cpuGroupSize) != 0 { - return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false) - } - - // Otherwise build an accumulator to start allocating CPUs from. - acc := newCPUAccumulator(topo, availableCPUs, numCPUs, cpuSortingStrategy) - if acc.isSatisfied() { - return acc.result, nil - } - if acc.isFailed() { - return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request: requested=%d, available=%d", numCPUs, availableCPUs.Size()) - } - - // Get the list of NUMA nodes represented by the set of CPUs in 'availableCPUs'. - numas := acc.sortAvailableNUMANodes() - - // Calculate the minimum and maximum possible number of NUMA nodes that - // could satisfy this request. This is used to optimize how many iterations - // of the loop we need to go through below. - minNUMAs, maxNUMAs := acc.rangeNUMANodesNeededToSatisfy(cpuGroupSize) - - // Try combinations of 1,2,3,... NUMA nodes until we find a combination - // where we can evenly distribute CPUs across them. To optimize things, we - // don't always start at 1 and end at len(numas). Instead, we use the - // values of 'minNUMAs' and 'maxNUMAs' calculated above. - for k := minNUMAs; k <= maxNUMAs; k++ { - // Iterate through the various n-choose-k NUMA node combinations, - // looking for the combination of NUMA nodes that can best have CPUs - // distributed across them. - var bestBalance float64 = math.MaxFloat64 - var bestRemainder []int = nil - var bestCombo []int = nil - acc.iterateCombinations(numas, k, func(combo []int) LoopControl { - // If we've already found a combo with a balance of 0 in a - // different iteration, then don't bother checking any others. - if bestBalance == 0 { - return Break - } - - // Check that this combination of NUMA nodes has enough CPUs to - // satisfy the allocation overall. - cpus := acc.details.CPUsInNUMANodes(combo...) - if cpus.Size() < numCPUs { - return Continue - } - - // Check that CPUs can be handed out in groups of size - // 'cpuGroupSize' across the NUMA nodes in this combo. - numCPUGroups := 0 - for _, numa := range combo { - numCPUGroups += (acc.details.CPUsInNUMANodes(numa).Size() / cpuGroupSize) - } - if (numCPUGroups * cpuGroupSize) < numCPUs { - return Continue - } - - // Check that each NUMA node in this combination can allocate an - // even distribution of CPUs in groups of size 'cpuGroupSize', - // modulo some remainder. - distribution := (numCPUs / len(combo) / cpuGroupSize) * cpuGroupSize - for _, numa := range combo { - cpus := acc.details.CPUsInNUMANodes(numa) - if cpus.Size() < distribution { - return Continue - } - } - - // Calculate how many CPUs will be available on each NUMA node in - // the system after allocating an even distribution of CPU groups - // of size 'cpuGroupSize' from each NUMA node in 'combo'. This will - // be used in the "balance score" calculation to help decide if - // this combo should ultimately be chosen. - availableAfterAllocation := make(mapIntInt, len(numas)) - for _, numa := range numas { - availableAfterAllocation[numa] = acc.details.CPUsInNUMANodes(numa).Size() - } - for _, numa := range combo { - availableAfterAllocation[numa] -= distribution - } - - // Check if there are any remaining CPUs to distribute across the - // NUMA nodes once CPUs have been evenly distributed in groups of - // size 'cpuGroupSize'. - remainder := numCPUs - (distribution * len(combo)) - - // Get a list of NUMA nodes to consider pulling the remainder CPUs - // from. This list excludes NUMA nodes that don't have at least - // 'cpuGroupSize' CPUs available after being allocated - // 'distribution' number of CPUs. - var remainderCombo []int - for _, numa := range combo { - if availableAfterAllocation[numa] >= cpuGroupSize { - remainderCombo = append(remainderCombo, numa) - } - } - - // Declare a set of local variables to help track the "balance - // scores" calculated when using different subsets of - // 'remainderCombo' to allocate remainder CPUs from. - var bestLocalBalance float64 = math.MaxFloat64 - var bestLocalRemainder []int = nil - - // If there aren't any remainder CPUs to allocate, then calculate - // the "balance score" of this combo as the standard deviation of - // the values contained in 'availableAfterAllocation'. - if remainder == 0 { - bestLocalBalance = standardDeviation(availableAfterAllocation.Values()) - bestLocalRemainder = nil - } - - // Otherwise, find the best "balance score" when allocating the - // remainder CPUs across different subsets of NUMA nodes in 'remainderCombo'. - // These remainder CPUs are handed out in groups of size 'cpuGroupSize'. - // We start from k=len(remainderCombo) and walk down to k=1 so that - // we continue to distribute CPUs as much as possible across - // multiple NUMA nodes. - for k := len(remainderCombo); remainder > 0 && k >= 1; k-- { - acc.iterateCombinations(remainderCombo, k, func(subset []int) LoopControl { - // Make a local copy of 'remainder'. - remainder := remainder - - // Make a local copy of 'availableAfterAllocation'. - availableAfterAllocation := availableAfterAllocation.Clone() - - // If this subset is not capable of allocating all - // remainder CPUs, continue to the next one. - if sum(availableAfterAllocation.Values(subset...)) < remainder { - return Continue - } - - // For all NUMA nodes in 'subset', walk through them, - // removing 'cpuGroupSize' number of CPUs from each - // until all remainder CPUs have been accounted for. - for remainder > 0 { - for _, numa := range subset { - if remainder == 0 { - break - } - if availableAfterAllocation[numa] < cpuGroupSize { - continue - } - availableAfterAllocation[numa] -= cpuGroupSize - remainder -= cpuGroupSize - } - } - - // Calculate the "balance score" as the standard deviation - // of the number of CPUs available on all NUMA nodes in the - // system after the remainder CPUs have been allocated - // across 'subset' in groups of size 'cpuGroupSize'. - balance := standardDeviation(availableAfterAllocation.Values()) - if balance < bestLocalBalance { - bestLocalBalance = balance - bestLocalRemainder = subset - } - - return Continue - }) - } - - // If the best "balance score" for this combo is less than the - // lowest "balance score" of all previous combos, then update this - // combo (and remainder set) to be the best one found so far. - if bestLocalBalance < bestBalance { - bestBalance = bestLocalBalance - bestRemainder = bestLocalRemainder - bestCombo = combo - } - - return Continue - }) - - // If we made it through all of the iterations above without finding a - // combination of NUMA nodes that can properly balance CPU allocations, - // then move on to the next larger set of NUMA node combinations. - if bestCombo == nil { - continue - } - - // Otherwise, start allocating CPUs from the NUMA node combination - // chosen. First allocate an even distribution of CPUs in groups of - // size 'cpuGroupSize' from 'bestCombo'. - distribution := (numCPUs / len(bestCombo) / cpuGroupSize) * cpuGroupSize - for _, numa := range bestCombo { - cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), distribution, cpuSortingStrategy, false) - acc.take(cpus) - } - - // Then allocate any remaining CPUs in groups of size 'cpuGroupSize' - // from each NUMA node in the remainder set. - remainder := numCPUs - (distribution * len(bestCombo)) - for remainder > 0 { - for _, numa := range bestRemainder { - if remainder == 0 { - break - } - if acc.details.CPUsInNUMANodes(numa).Size() < cpuGroupSize { - continue - } - cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), cpuGroupSize, cpuSortingStrategy, false) - acc.take(cpus) - remainder -= cpuGroupSize - } - } - - // If we haven't allocated all of our CPUs at this point, then something - // went wrong in our accounting and we should error out. - if acc.numCPUsNeeded > 0 { - return cpuset.New(), fmt.Errorf("accounting error, not enough CPUs allocated, remaining: %v", acc.numCPUsNeeded) - } - - // Likewise, if we have allocated too many CPUs at this point, then something - // went wrong in our accounting and we should error out. - if acc.numCPUsNeeded < 0 { - return cpuset.New(), fmt.Errorf("accounting error, too many CPUs allocated, remaining: %v", acc.numCPUsNeeded) - } - - // Otherwise, return the result - return acc.result, nil - } - - // If we never found a combination of NUMA nodes that we could properly - // distribute CPUs across, fall back to the packing algorithm. - return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go deleted file mode 100644 index 8b59ba67121..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ /dev/null @@ -1,527 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cpumanager - -import ( - "context" - "fmt" - "math" - "sync" - "time" - - cadvisorapi "github.com/google/cadvisor/info/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/wait" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - "k8s.io/klog/v2" - - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/config" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/status" - "k8s.io/utils/cpuset" -) - -// ActivePodsFunc is a function that returns a list of pods to reconcile. -type ActivePodsFunc func() []*v1.Pod - -type runtimeService interface { - UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error -} - -type policyName string - -// cpuManagerStateFileName is the file name where cpu manager stores its state -const cpuManagerStateFileName = "cpu_manager_state" - -// Manager interface provides methods for Kubelet to manage pod cpus. -type Manager interface { - // Start is called during Kubelet initialization. - Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error - - // Called to trigger the allocation of CPUs to a container. This must be - // called at some point prior to the AddContainer() call for a container, - // e.g. at pod admission time. - Allocate(pod *v1.Pod, container *v1.Container) error - - // AddContainer adds the mapping between container ID to pod UID and the container name - // The mapping used to remove the CPU allocation during the container removal - AddContainer(p *v1.Pod, c *v1.Container, containerID string) - - // RemoveContainer is called after Kubelet decides to kill or delete a - // container. After this call, the CPU manager stops trying to reconcile - // that container and any CPUs dedicated to the container are freed. - RemoveContainer(containerID string) error - - // State returns a read-only interface to the internal CPU manager state. - State() state.Reader - - // GetTopologyHints implements the topologymanager.HintProvider Interface - // and is consulted to achieve NUMA aware resource alignment among this - // and other resource controllers. - GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint - - // GetExclusiveCPUs implements the podresources.CPUsProvider interface to provide - // exclusively allocated cpus for the container - GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet - - // GetPodTopologyHints implements the topologymanager.HintProvider Interface - // and is consulted to achieve NUMA aware resource alignment per Pod - // among this and other resource controllers. - GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint - - // GetAllocatableCPUs returns the total set of CPUs available for allocation. - GetAllocatableCPUs() cpuset.CPUSet - - // GetCPUAffinity returns cpuset which includes cpus from shared pools - // as well as exclusively allocated cpus - GetCPUAffinity(podUID, containerName string) cpuset.CPUSet - - // GetAllCPUs returns all the CPUs known by cpumanager, as reported by the - // hardware discovery. Maps to the CPU capacity. - GetAllCPUs() cpuset.CPUSet -} - -type manager struct { - sync.Mutex - policy Policy - - // reconcilePeriod is the duration between calls to reconcileState. - reconcilePeriod time.Duration - - // state allows pluggable CPU assignment policies while sharing a common - // representation of state for the system to inspect and reconcile. - state state.State - - // lastUpdatedstate holds state for each container from the last time it was updated. - lastUpdateState state.State - - // containerRuntime is the container runtime service interface needed - // to make UpdateContainerResources() calls against the containers. - containerRuntime runtimeService - - // activePods is a method for listing active pods on the node - // so all the containers can be updated in the reconciliation loop. - activePods ActivePodsFunc - - // podStatusProvider provides a method for obtaining pod statuses - // and the containerID of their containers - podStatusProvider status.PodStatusProvider - - // containerMap provides a mapping from (pod, container) -> containerID - // for all containers a pod - containerMap containermap.ContainerMap - - topology *topology.CPUTopology - - nodeAllocatableReservation v1.ResourceList - - // sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness. - // We use it to determine when we can purge inactive pods from checkpointed state. - sourcesReady config.SourcesReady - - // stateFileDirectory holds the directory where the state file for checkpoints is held. - stateFileDirectory string - - // allCPUs is the set of online CPUs as reported by the system - allCPUs cpuset.CPUSet - - // allocatableCPUs is the set of online CPUs as reported by the system, - // and available for allocation, minus the reserved set - allocatableCPUs cpuset.CPUSet -} - -var _ Manager = &manager{} - -type sourcesReadyStub struct{} - -func (s *sourcesReadyStub) AddSource(source string) {} -func (s *sourcesReadyStub) AllReady() bool { return true } - -// NewManager creates new cpu manager based on provided policy -func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) { - var topo *topology.CPUTopology - var policy Policy - var err error - - topo, err = topology.Discover(machineInfo) - if err != nil { - return nil, err - } - - switch policyName(cpuPolicyName) { - - case PolicyNone: - policy, err = NewNonePolicy(cpuPolicyOptions) - if err != nil { - return nil, fmt.Errorf("new none policy error: %w", err) - } - - case PolicyStatic: - klog.InfoS("Detected CPU topology", "topology", topo) - - reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU] - if !ok { - // The static policy cannot initialize without this information. - return nil, fmt.Errorf("[cpumanager] unable to determine reserved CPU resources for static policy") - } - if reservedCPUs.IsZero() { - // The static policy requires this to be nonzero. Zero CPU reservation - // would allow the shared pool to be completely exhausted. At that point - // either we would violate our guarantee of exclusivity or need to evict - // any pod that has at least one container that requires zero CPUs. - // See the comments in policy_static.go for more details. - return nil, fmt.Errorf("[cpumanager] the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero") - } - - // Take the ceiling of the reservation, since fractional CPUs cannot be - // exclusively allocated. - reservedCPUsFloat := float64(reservedCPUs.MilliValue()) / 1000 - numReservedCPUs := int(math.Ceil(reservedCPUsFloat)) - policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions) - if err != nil { - return nil, fmt.Errorf("new static policy error: %w", err) - } - - default: - return nil, fmt.Errorf("unknown policy: \"%s\"", cpuPolicyName) - } - - manager := &manager{ - policy: policy, - reconcilePeriod: reconcilePeriod, - lastUpdateState: state.NewMemoryState(), - topology: topo, - nodeAllocatableReservation: nodeAllocatableReservation, - stateFileDirectory: stateFileDirectory, - allCPUs: topo.CPUDetails.CPUs(), - } - manager.sourcesReady = &sourcesReadyStub{} - return manager, nil -} - -func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error { - klog.InfoS("Starting CPU manager", "policy", m.policy.Name()) - klog.InfoS("Reconciling", "reconcilePeriod", m.reconcilePeriod) - m.sourcesReady = sourcesReady - m.activePods = activePods - m.podStatusProvider = podStatusProvider - m.containerRuntime = containerRuntime - m.containerMap = initialContainers - - stateImpl, err := state.NewCheckpointState(m.stateFileDirectory, cpuManagerStateFileName, m.policy.Name(), m.containerMap) - if err != nil { - klog.ErrorS(err, "Could not initialize checkpoint manager, please drain node and remove policy state file") - return err - } - m.state = stateImpl - - err = m.policy.Start(m.state) - if err != nil { - klog.ErrorS(err, "Policy start error") - return err - } - - klog.V(4).InfoS("CPU manager started", "policy", m.policy.Name()) - - m.allocatableCPUs = m.policy.GetAllocatableCPUs(m.state) - - if m.policy.Name() == string(PolicyNone) { - return nil - } - // Periodically call m.reconcileState() to continue to keep the CPU sets of - // all pods in sync with and guaranteed CPUs handed out among them. - go wait.Until(func() { m.reconcileState() }, m.reconcilePeriod, wait.NeverStop) - return nil -} - -func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error { - // Garbage collect any stranded resources before allocating CPUs. - m.removeStaleState() - - m.Lock() - defer m.Unlock() - - // Call down into the policy to assign this container CPUs if required. - err := m.policy.Allocate(m.state, p, c) - if err != nil { - klog.ErrorS(err, "Allocate error") - return err - } - - return nil -} - -func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { - m.Lock() - defer m.Unlock() - if cset, exists := m.state.GetCPUSet(string(pod.UID), container.Name); exists { - m.lastUpdateState.SetCPUSet(string(pod.UID), container.Name, cset) - } - m.containerMap.Add(string(pod.UID), container.Name, containerID) -} - -func (m *manager) RemoveContainer(containerID string) error { - m.Lock() - defer m.Unlock() - - err := m.policyRemoveContainerByID(containerID) - if err != nil { - klog.ErrorS(err, "RemoveContainer error") - return err - } - - return nil -} - -func (m *manager) policyRemoveContainerByID(containerID string) error { - podUID, containerName, err := m.containerMap.GetContainerRef(containerID) - if err != nil { - return nil - } - - err = m.policy.RemoveContainer(m.state, podUID, containerName) - if err == nil { - m.lastUpdateState.Delete(podUID, containerName) - m.containerMap.RemoveByContainerID(containerID) - } - - return err -} - -func (m *manager) policyRemoveContainerByRef(podUID string, containerName string) error { - err := m.policy.RemoveContainer(m.state, podUID, containerName) - if err == nil { - m.lastUpdateState.Delete(podUID, containerName) - m.containerMap.RemoveByContainerRef(podUID, containerName) - } - - return err -} - -func (m *manager) State() state.Reader { - return m.state -} - -func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - // Garbage collect any stranded resources before providing TopologyHints - m.removeStaleState() - // Delegate to active policy - return m.policy.GetTopologyHints(m.state, pod, container) -} - -func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { - // Garbage collect any stranded resources before providing TopologyHints - m.removeStaleState() - // Delegate to active policy - return m.policy.GetPodTopologyHints(m.state, pod) -} - -func (m *manager) GetAllocatableCPUs() cpuset.CPUSet { - return m.allocatableCPUs.Clone() -} - -func (m *manager) GetAllCPUs() cpuset.CPUSet { - return m.allCPUs.Clone() -} - -type reconciledContainer struct { - podName string - containerName string - containerID string -} - -func (m *manager) removeStaleState() { - // Only once all sources are ready do we attempt to remove any stale state. - // This ensures that the call to `m.activePods()` below will succeed with - // the actual active pods list. - if !m.sourcesReady.AllReady() { - return - } - - // We grab the lock to ensure that no new containers will grab CPUs while - // executing the code below. Without this lock, its possible that we end up - // removing state that is newly added by an asynchronous call to - // AddContainer() during the execution of this code. - m.Lock() - defer m.Unlock() - - // Get the list of active pods. - activePods := m.activePods() - - // Build a list of (podUID, containerName) pairs for all containers in all active Pods. - activeContainers := make(map[string]map[string]struct{}) - for _, pod := range activePods { - activeContainers[string(pod.UID)] = make(map[string]struct{}) - for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - activeContainers[string(pod.UID)][container.Name] = struct{}{} - } - } - - // Loop through the CPUManager state. Remove any state for containers not - // in the `activeContainers` list built above. - assignments := m.state.GetCPUAssignments() - for podUID := range assignments { - for containerName := range assignments[podUID] { - if _, ok := activeContainers[podUID][containerName]; ok { - klog.V(5).InfoS("RemoveStaleState: container still active", "podUID", podUID, "containerName", containerName) - continue - } - klog.V(2).InfoS("RemoveStaleState: removing container", "podUID", podUID, "containerName", containerName) - err := m.policyRemoveContainerByRef(podUID, containerName) - if err != nil { - klog.ErrorS(err, "RemoveStaleState: failed to remove container", "podUID", podUID, "containerName", containerName) - } - } - } - - m.containerMap.Visit(func(podUID, containerName, containerID string) { - if _, ok := activeContainers[podUID][containerName]; ok { - klog.V(5).InfoS("RemoveStaleState: containerMap: container still active", "podUID", podUID, "containerName", containerName) - return - } - klog.V(2).InfoS("RemoveStaleState: containerMap: removing container", "podUID", podUID, "containerName", containerName) - err := m.policyRemoveContainerByRef(podUID, containerName) - if err != nil { - klog.ErrorS(err, "RemoveStaleState: containerMap: failed to remove container", "podUID", podUID, "containerName", containerName) - } - }) -} - -func (m *manager) reconcileState() (success []reconciledContainer, failure []reconciledContainer) { - ctx := context.Background() - success = []reconciledContainer{} - failure = []reconciledContainer{} - - m.removeStaleState() - for _, pod := range m.activePods() { - pstatus, ok := m.podStatusProvider.GetPodStatus(pod.UID) - if !ok { - klog.V(5).InfoS("ReconcileState: skipping pod; status not found", "pod", klog.KObj(pod)) - failure = append(failure, reconciledContainer{pod.Name, "", ""}) - continue - } - - allContainers := pod.Spec.InitContainers - allContainers = append(allContainers, pod.Spec.Containers...) - for _, container := range allContainers { - containerID, err := findContainerIDByName(&pstatus, container.Name) - if err != nil { - klog.V(5).InfoS("ReconcileState: skipping container; ID not found in pod status", "pod", klog.KObj(pod), "containerName", container.Name, "err", err) - failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) - continue - } - - cstatus, err := findContainerStatusByName(&pstatus, container.Name) - if err != nil { - klog.V(5).InfoS("ReconcileState: skipping container; container status not found in pod status", "pod", klog.KObj(pod), "containerName", container.Name, "err", err) - failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) - continue - } - - if cstatus.State.Waiting != nil || - (cstatus.State.Waiting == nil && cstatus.State.Running == nil && cstatus.State.Terminated == nil) { - klog.V(4).InfoS("ReconcileState: skipping container; container still in the waiting state", "pod", klog.KObj(pod), "containerName", container.Name, "err", err) - failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) - continue - } - - m.Lock() - if cstatus.State.Terminated != nil { - // The container is terminated but we can't call m.RemoveContainer() - // here because it could remove the allocated cpuset for the container - // which may be in the process of being restarted. That would result - // in the container losing any exclusively-allocated CPUs that it - // was allocated. - _, _, err := m.containerMap.GetContainerRef(containerID) - if err == nil { - klog.V(4).InfoS("ReconcileState: ignoring terminated container", "pod", klog.KObj(pod), "containerID", containerID) - } - m.Unlock() - continue - } - - // Once we make it here we know we have a running container. - // Idempotently add it to the containerMap incase it is missing. - // This can happen after a kubelet restart, for example. - m.containerMap.Add(string(pod.UID), container.Name, containerID) - m.Unlock() - - cset := m.state.GetCPUSetOrDefault(string(pod.UID), container.Name) - if cset.IsEmpty() { - // NOTE: This should not happen outside of tests. - klog.V(2).InfoS("ReconcileState: skipping container; empty cpuset assigned", "pod", klog.KObj(pod), "containerName", container.Name) - failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) - continue - } - - lcset := m.lastUpdateState.GetCPUSetOrDefault(string(pod.UID), container.Name) - if !cset.Equals(lcset) { - klog.V(5).InfoS("ReconcileState: updating container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset) - err = m.updateContainerCPUSet(ctx, containerID, cset) - if err != nil { - klog.ErrorS(err, "ReconcileState: failed to update container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset) - failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) - continue - } - m.lastUpdateState.SetCPUSet(string(pod.UID), container.Name, cset) - } - success = append(success, reconciledContainer{pod.Name, container.Name, containerID}) - } - } - return success, failure -} - -func findContainerIDByName(status *v1.PodStatus, name string) (string, error) { - allStatuses := status.InitContainerStatuses - allStatuses = append(allStatuses, status.ContainerStatuses...) - for _, container := range allStatuses { - if container.Name == name && container.ContainerID != "" { - cid := &kubecontainer.ContainerID{} - err := cid.ParseString(container.ContainerID) - if err != nil { - return "", err - } - return cid.ID, nil - } - } - return "", fmt.Errorf("unable to find ID for container with name %v in pod status (it may not be running)", name) -} - -func findContainerStatusByName(status *v1.PodStatus, name string) (*v1.ContainerStatus, error) { - for _, containerStatus := range append(status.InitContainerStatuses, status.ContainerStatuses...) { - if containerStatus.Name == name { - return &containerStatus, nil - } - } - return nil, fmt.Errorf("unable to find status for container with name %v in pod status (it may not be running)", name) -} - -func (m *manager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet { - if result, ok := m.state.GetCPUSet(podUID, containerName); ok { - return result - } - - return cpuset.CPUSet{} -} - -func (m *manager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet { - return m.state.GetCPUSetOrDefault(podUID, containerName) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_others.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_others.go deleted file mode 100644 index 556583b1a7c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_others.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build !windows -// +build !windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cpumanager - -import ( - "context" - - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - "k8s.io/utils/cpuset" -) - -func (m *manager) updateContainerCPUSet(ctx context.Context, containerID string, cpus cpuset.CPUSet) error { - // TODO: Consider adding a `ResourceConfigForContainer` helper in - // helpers_linux.go similar to what exists for pods. - // It would be better to pass the full container resources here instead of - // this patch-like partial resources. - - return m.containerRuntime.UpdateContainerResources( - ctx, - containerID, - &runtimeapi.ContainerResources{ - Linux: &runtimeapi.LinuxContainerResources{ - CpusetCpus: cpus.String(), - }, - }) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_windows.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_windows.go deleted file mode 100644 index f9a239ce550..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cpumanager - -import ( - "context" - utilfeature "k8s.io/apiserver/pkg/util/feature" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/winstats" - "k8s.io/utils/cpuset" -) - -func (m *manager) updateContainerCPUSet(ctx context.Context, containerID string, cpus cpuset.CPUSet) error { - if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WindowsCPUAndMemoryAffinity) { - return nil - } - - affinities := winstats.CpusToGroupAffinity(cpus.List()) - var cpuGroupAffinities []*runtimeapi.WindowsCpuGroupAffinity - for _, affinity := range affinities { - cpuGroupAffinities = append(cpuGroupAffinities, &runtimeapi.WindowsCpuGroupAffinity{ - CpuGroup: uint32(affinity.Group), - CpuMask: uint64(affinity.Mask), - }) - } - return m.containerRuntime.UpdateContainerResources(ctx, containerID, &runtimeapi.ContainerResources{ - Windows: &runtimeapi.WindowsContainerResources{ - AffinityCpus: cpuGroupAffinities, - }, - }) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go deleted file mode 100644 index 8f00ec3784b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cpumanager - -import ( - "k8s.io/api/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/config" - "k8s.io/kubernetes/pkg/kubelet/status" - "k8s.io/utils/cpuset" -) - -type fakeManager struct { - state state.State -} - -func (m *fakeManager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error { - klog.InfoS("Start()") - return nil -} - -func (m *fakeManager) Policy() Policy { - klog.InfoS("Policy()") - pol, _ := NewNonePolicy(nil) - return pol -} - -func (m *fakeManager) Allocate(pod *v1.Pod, container *v1.Container) error { - klog.InfoS("Allocate", "pod", klog.KObj(pod), "containerName", container.Name) - return nil -} - -func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { - klog.InfoS("AddContainer", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID) -} - -func (m *fakeManager) RemoveContainer(containerID string) error { - klog.InfoS("RemoveContainer", "containerID", containerID) - return nil -} - -func (m *fakeManager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - klog.InfoS("Get container topology hints") - return map[string][]topologymanager.TopologyHint{} -} - -func (m *fakeManager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { - klog.InfoS("Get pod topology hints") - return map[string][]topologymanager.TopologyHint{} -} - -func (m *fakeManager) State() state.Reader { - return m.state -} - -func (m *fakeManager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet { - klog.InfoS("GetExclusiveCPUs", "podUID", podUID, "containerName", containerName) - return cpuset.CPUSet{} -} - -func (m *fakeManager) GetAllocatableCPUs() cpuset.CPUSet { - klog.InfoS("Get Allocatable CPUs") - return cpuset.CPUSet{} -} - -func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet { - klog.InfoS("GetCPUAffinity", "podUID", podUID, "containerName", containerName) - return cpuset.CPUSet{} -} - -func (m *fakeManager) GetAllCPUs() cpuset.CPUSet { - klog.InfoS("GetAllCPUs") - return cpuset.CPUSet{} -} - -// NewFakeManager creates empty/fake cpu manager -func NewFakeManager() Manager { - return &fakeManager{ - state: state.NewMemoryState(), - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go deleted file mode 100644 index a80da9427ce..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cpumanager - -import ( - "k8s.io/api/core/v1" - - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/utils/cpuset" -) - -// Policy implements logic for pod container to CPU assignment. -type Policy interface { - Name() string - Start(s state.State) error - // Allocate call is idempotent - Allocate(s state.State, pod *v1.Pod, container *v1.Container) error - // RemoveContainer call is idempotent - RemoveContainer(s state.State, podUID string, containerName string) error - // GetTopologyHints implements the topologymanager.HintProvider Interface - // and is consulted to achieve NUMA aware resource alignment among this - // and other resource controllers. - GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint - // GetPodTopologyHints implements the topologymanager.HintProvider Interface - // and is consulted to achieve NUMA aware resource alignment per Pod - // among this and other resource controllers. - GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint - // GetAllocatableCPUs returns the total set of CPUs available for allocation. - GetAllocatableCPUs(m state.State) cpuset.CPUSet -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go deleted file mode 100644 index ff86498fd92..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cpumanager - -import ( - "fmt" - - "k8s.io/api/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/utils/cpuset" -) - -type nonePolicy struct{} - -var _ Policy = &nonePolicy{} - -// PolicyNone name of none policy -const PolicyNone policyName = "none" - -// NewNonePolicy returns a cpuset manager policy that does nothing -func NewNonePolicy(cpuPolicyOptions map[string]string) (Policy, error) { - if len(cpuPolicyOptions) > 0 { - return nil, fmt.Errorf("None policy: received unsupported options=%v", cpuPolicyOptions) - } - return &nonePolicy{}, nil -} - -func (p *nonePolicy) Name() string { - return string(PolicyNone) -} - -func (p *nonePolicy) Start(s state.State) error { - klog.InfoS("None policy: Start") - return nil -} - -func (p *nonePolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error { - return nil -} - -func (p *nonePolicy) RemoveContainer(s state.State, podUID string, containerName string) error { - return nil -} - -func (p *nonePolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - return nil -} - -func (p *nonePolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint { - return nil -} - -// Assignable CPUs are the ones that can be exclusively allocated to pods that meet the exclusivity requirement -// (ie guaranteed QoS class and integral CPU request). -// Assignability of CPUs as a concept is only applicable in case of static policy i.e. scenarios where workloads -// CAN get exclusive access to core(s). -// Hence, we return empty set here: no cpus are assignable according to above definition with this policy. -func (p *nonePolicy) GetAllocatableCPUs(m state.State) cpuset.CPUSet { - return cpuset.New() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_options.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_options.go deleted file mode 100644 index 65b415bdad6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_options.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cpumanager - -import ( - "fmt" - "strconv" - - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" -) - -// Names of the options, as part of the user interface. -const ( - FullPCPUsOnlyOption string = "full-pcpus-only" - DistributeCPUsAcrossNUMAOption string = "distribute-cpus-across-numa" - AlignBySocketOption string = "align-by-socket" - DistributeCPUsAcrossCoresOption string = "distribute-cpus-across-cores" - StrictCPUReservationOption string = "strict-cpu-reservation" - PreferAlignByUnCoreCacheOption string = "prefer-align-cpus-by-uncorecache" -) - -var ( - alphaOptions = sets.New[string]( - AlignBySocketOption, - DistributeCPUsAcrossCoresOption, - PreferAlignByUnCoreCacheOption, - ) - betaOptions = sets.New[string]( - StrictCPUReservationOption, - DistributeCPUsAcrossNUMAOption, - ) - stableOptions = sets.New[string]( - FullPCPUsOnlyOption, - ) -) - -// CheckPolicyOptionAvailable verifies if the given option can be used depending on the Feature Gate Settings. -// returns nil on success, or an error describing the failure on error. -func CheckPolicyOptionAvailable(option string) error { - if !alphaOptions.Has(option) && !betaOptions.Has(option) && !stableOptions.Has(option) { - return fmt.Errorf("unknown CPU Manager Policy option: %q", option) - } - - if alphaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManagerPolicyAlphaOptions) { - return fmt.Errorf("CPU Manager Policy Alpha-level Options not enabled, but option %q provided", option) - } - - if betaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManagerPolicyBetaOptions) { - return fmt.Errorf("CPU Manager Policy Beta-level Options not enabled, but option %q provided", option) - } - - // if the option is stable, we need no CPUManagerPolicy*Options feature gate check - return nil -} - -// StaticPolicyOptions holds the parsed value of the policy options, ready to be consumed internally. -type StaticPolicyOptions struct { - // flag to enable extra allocation restrictions to avoid - // different containers to possibly end up on the same core. - // we consider "core" and "physical CPU" synonim here, leaning - // towards the terminoloy k8s hints. We acknowledge this is confusing. - // - // looking at https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/, - // any possible naming scheme will lead to ambiguity to some extent. - // We picked "pcpu" because it the established docs hints at vCPU already. - FullPhysicalCPUsOnly bool - // Flag to evenly distribute CPUs across NUMA nodes in cases where more - // than one NUMA node is required to satisfy the allocation. - DistributeCPUsAcrossNUMA bool - // Flag to ensure CPUs are considered aligned at socket boundary rather than - // NUMA boundary - AlignBySocket bool - // flag to enable extra allocation restrictions to spread - // cpus (HT) on different physical core. - // This is a preferred policy so do not throw error if they have to packed in one physical core. - DistributeCPUsAcrossCores bool - // Flag to remove reserved cores from the list of available cores - StrictCPUReservation bool - // Flag that makes best-effort to align CPUs to a uncorecache boundary - // As long as there are CPUs available, pods will be admitted if the condition is not met. - PreferAlignByUncoreCacheOption bool -} - -// NewStaticPolicyOptions creates a StaticPolicyOptions struct from the user configuration. -func NewStaticPolicyOptions(policyOptions map[string]string) (StaticPolicyOptions, error) { - opts := StaticPolicyOptions{} - for name, value := range policyOptions { - if err := CheckPolicyOptionAvailable(name); err != nil { - return opts, err - } - - switch name { - case FullPCPUsOnlyOption: - optValue, err := strconv.ParseBool(value) - if err != nil { - return opts, fmt.Errorf("bad value for option %q: %w", name, err) - } - opts.FullPhysicalCPUsOnly = optValue - case DistributeCPUsAcrossNUMAOption: - optValue, err := strconv.ParseBool(value) - if err != nil { - return opts, fmt.Errorf("bad value for option %q: %w", name, err) - } - opts.DistributeCPUsAcrossNUMA = optValue - case AlignBySocketOption: - optValue, err := strconv.ParseBool(value) - if err != nil { - return opts, fmt.Errorf("bad value for option %q: %w", name, err) - } - opts.AlignBySocket = optValue - case DistributeCPUsAcrossCoresOption: - optValue, err := strconv.ParseBool(value) - if err != nil { - return opts, fmt.Errorf("bad value for option %q: %w", name, err) - } - opts.DistributeCPUsAcrossCores = optValue - case StrictCPUReservationOption: - optValue, err := strconv.ParseBool(value) - if err != nil { - return opts, fmt.Errorf("bad value for option %q: %w", name, err) - } - opts.StrictCPUReservation = optValue - case PreferAlignByUnCoreCacheOption: - optValue, err := strconv.ParseBool(value) - if err != nil { - return opts, fmt.Errorf("bad value for option %q: %w", name, err) - } - opts.PreferAlignByUncoreCacheOption = optValue - default: - // this should never be reached, we already detect unknown options, - // but we keep it as further safety. - return opts, fmt.Errorf("unsupported cpumanager option: %q (%s)", name, value) - } - } - - if opts.FullPhysicalCPUsOnly && opts.DistributeCPUsAcrossCores { - return opts, fmt.Errorf("static policy options %s and %s can not be used at the same time", FullPCPUsOnlyOption, DistributeCPUsAcrossCoresOption) - } - - // TODO(@Jeffwan): Remove this check after more compatibility tests are done. - if opts.DistributeCPUsAcrossNUMA && opts.DistributeCPUsAcrossCores { - return opts, fmt.Errorf("static policy options %s and %s can not be used at the same time", DistributeCPUsAcrossNUMAOption, DistributeCPUsAcrossCoresOption) - } - - if opts.PreferAlignByUncoreCacheOption && opts.DistributeCPUsAcrossCores { - return opts, fmt.Errorf("static policy options %s and %s can not be used at the same time", PreferAlignByUnCoreCacheOption, DistributeCPUsAcrossCoresOption) - } - - if opts.PreferAlignByUncoreCacheOption && opts.DistributeCPUsAcrossNUMA { - return opts, fmt.Errorf("static policy options %s and %s can not be used at the same time", PreferAlignByUnCoreCacheOption, DistributeCPUsAcrossNUMAOption) - } - - return opts, nil -} - -// ValidateStaticPolicyOptions ensures that the requested policy options are compatible with the machine on which the CPUManager is running. -func ValidateStaticPolicyOptions(opts StaticPolicyOptions, topology *topology.CPUTopology, topologyManager topologymanager.Store) error { - if opts.AlignBySocket { - // Not compatible with topology manager single-numa-node policy option. - if topologyManager.GetPolicy().Name() == topologymanager.PolicySingleNumaNode { - return fmt.Errorf("Topolgy manager %s policy is incompatible with CPUManager %s policy option", topologymanager.PolicySingleNumaNode, AlignBySocketOption) - } - // Not compatible with topology when number of sockets are more than number of NUMA nodes. - if topology.NumSockets > topology.NumNUMANodes { - return fmt.Errorf("Align by socket is not compatible with hardware where number of sockets are more than number of NUMA") - } - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go deleted file mode 100644 index 28591c5baf1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go +++ /dev/null @@ -1,816 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cpumanager - -import ( - "fmt" - "strconv" - - v1 "k8s.io/api/core/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" - "k8s.io/kubernetes/pkg/kubelet/metrics" - "k8s.io/utils/cpuset" -) - -const ( - - // PolicyStatic is the name of the static policy. - // Should options be given, these will be ignored and backward (up to 1.21 included) - // compatible behaviour will be enforced - PolicyStatic policyName = "static" - // ErrorSMTAlignment represents the type of an SMTAlignmentError - ErrorSMTAlignment = "SMTAlignmentError" -) - -// SMTAlignmentError represents an error due to SMT alignment -type SMTAlignmentError struct { - RequestedCPUs int - CpusPerCore int - AvailablePhysicalCPUs int - CausedByPhysicalCPUs bool -} - -func (e SMTAlignmentError) Error() string { - if e.CausedByPhysicalCPUs { - return fmt.Sprintf("SMT Alignment Error: not enough free physical CPUs: available physical CPUs = %d, requested CPUs = %d, CPUs per core = %d", e.AvailablePhysicalCPUs, e.RequestedCPUs, e.CpusPerCore) - } - return fmt.Sprintf("SMT Alignment Error: requested %d cpus not multiple cpus per core = %d", e.RequestedCPUs, e.CpusPerCore) -} - -// Type returns human-readable type of this error. Used in the admission control to populate Admission Failure reason. -func (e SMTAlignmentError) Type() string { - return ErrorSMTAlignment -} - -// staticPolicy is a CPU manager policy that does not change CPU -// assignments for exclusively pinned guaranteed containers after the main -// container process starts. -// -// This policy allocates CPUs exclusively for a container if all the following -// conditions are met: -// -// - The pod QoS class is Guaranteed. -// - The CPU request is a positive integer. -// -// The static policy maintains the following sets of logical CPUs: -// -// - SHARED: Burstable, BestEffort, and non-integral Guaranteed containers -// run here. Initially this contains all CPU IDs on the system. As -// exclusive allocations are created and destroyed, this CPU set shrinks -// and grows, accordingly. This is stored in the state as the default -// CPU set. -// -// - RESERVED: A subset of the shared pool which is not exclusively -// allocatable. The membership of this pool is static for the lifetime of -// the Kubelet. The size of the reserved pool is -// ceil(systemreserved.cpu + kubereserved.cpu). -// Reserved CPUs are taken topologically starting with lowest-indexed -// physical core, as reported by cAdvisor. -// -// - ASSIGNABLE: Equal to SHARED - RESERVED. Exclusive CPUs are allocated -// from this pool. -// -// - EXCLUSIVE ALLOCATIONS: CPU sets assigned exclusively to one container. -// These are stored as explicit assignments in the state. -// -// When an exclusive allocation is made, the static policy also updates the -// default cpuset in the state abstraction. The CPU manager's periodic -// reconcile loop takes care of rewriting the cpuset in cgroupfs for any -// containers that may be running in the shared pool. For this reason, -// applications running within exclusively-allocated containers must tolerate -// potentially sharing their allocated CPUs for up to the CPU manager -// reconcile period. -type staticPolicy struct { - // cpu socket topology - topology *topology.CPUTopology - // set of CPUs that is not available for exclusive assignment - reservedCPUs cpuset.CPUSet - // Superset of reservedCPUs. It includes not just the reservedCPUs themselves, - // but also any siblings of those reservedCPUs on the same physical die. - // NOTE: If the reserved set includes full physical CPUs from the beginning - // (e.g. only reserved pairs of core siblings) this set is expected to be - // identical to the reserved set. - reservedPhysicalCPUs cpuset.CPUSet - // topology manager reference to get container Topology affinity - affinity topologymanager.Store - // set of CPUs to reuse across allocations in a pod - cpusToReuse map[string]cpuset.CPUSet - // options allow to fine-tune the behaviour of the policy - options StaticPolicyOptions - // we compute this value multiple time, and it's not supposed to change - // at runtime - the cpumanager can't deal with runtime topology changes anyway. - cpuGroupSize int -} - -// Ensure staticPolicy implements Policy interface -var _ Policy = &staticPolicy{} - -// NewStaticPolicy returns a CPU manager policy that does not change CPU -// assignments for exclusively pinned guaranteed containers after the main -// container process starts. -func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string) (Policy, error) { - opts, err := NewStaticPolicyOptions(cpuPolicyOptions) - if err != nil { - return nil, err - } - err = ValidateStaticPolicyOptions(opts, topology, affinity) - if err != nil { - return nil, err - } - - cpuGroupSize := topology.CPUsPerCore() - klog.InfoS("Static policy created with configuration", "options", opts, "cpuGroupSize", cpuGroupSize) - - policy := &staticPolicy{ - topology: topology, - affinity: affinity, - cpusToReuse: make(map[string]cpuset.CPUSet), - options: opts, - cpuGroupSize: cpuGroupSize, - } - - allCPUs := topology.CPUDetails.CPUs() - var reserved cpuset.CPUSet - if reservedCPUs.Size() > 0 { - reserved = reservedCPUs - } else { - // takeByTopology allocates CPUs associated with low-numbered cores from - // allCPUs. - // - // For example: Given a system with 8 CPUs available and HT enabled, - // if numReservedCPUs=2, then reserved={0,4} - reserved, _ = policy.takeByTopology(allCPUs, numReservedCPUs) - } - - if reserved.Size() != numReservedCPUs { - err := fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of %s did not equal %d)", reserved, numReservedCPUs) - return nil, err - } - - var reservedPhysicalCPUs cpuset.CPUSet - for _, cpu := range reserved.UnsortedList() { - core, err := topology.CPUCoreID(cpu) - if err != nil { - return nil, fmt.Errorf("[cpumanager] unable to build the reserved physical CPUs from the reserved set: %w", err) - } - reservedPhysicalCPUs = reservedPhysicalCPUs.Union(topology.CPUDetails.CPUsInCores(core)) - } - - klog.InfoS("Reserved CPUs not available for exclusive assignment", "reservedSize", reserved.Size(), "reserved", reserved, "reservedPhysicalCPUs", reservedPhysicalCPUs) - policy.reservedCPUs = reserved - policy.reservedPhysicalCPUs = reservedPhysicalCPUs - - return policy, nil -} - -func (p *staticPolicy) Name() string { - return string(PolicyStatic) -} - -func (p *staticPolicy) Start(s state.State) error { - if err := p.validateState(s); err != nil { - klog.ErrorS(err, "Static policy invalid state, please drain node and remove policy state file") - return err - } - p.initializeMetrics(s) - return nil -} - -func (p *staticPolicy) validateState(s state.State) error { - tmpAssignments := s.GetCPUAssignments() - tmpDefaultCPUset := s.GetDefaultCPUSet() - - allCPUs := p.topology.CPUDetails.CPUs() - if p.options.StrictCPUReservation { - allCPUs = allCPUs.Difference(p.reservedCPUs) - } - - // Default cpuset cannot be empty when assignments exist - if tmpDefaultCPUset.IsEmpty() { - if len(tmpAssignments) != 0 { - return fmt.Errorf("default cpuset cannot be empty") - } - // state is empty initialize - s.SetDefaultCPUSet(allCPUs) - klog.InfoS("Static policy initialized", "defaultCPUSet", allCPUs) - return nil - } - - // State has already been initialized from file (is not empty) - // 1. Check if the reserved cpuset is not part of default cpuset because: - // - kube/system reserved have changed (increased) - may lead to some containers not being able to start - // - user tampered with file - if p.options.StrictCPUReservation { - if !p.reservedCPUs.Intersection(tmpDefaultCPUset).IsEmpty() { - return fmt.Errorf("some of strictly reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"", - p.reservedCPUs.Intersection(tmpDefaultCPUset).String(), tmpDefaultCPUset.String()) - } - } else { - if !p.reservedCPUs.Intersection(tmpDefaultCPUset).Equals(p.reservedCPUs) { - return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"", - p.reservedCPUs.String(), tmpDefaultCPUset.String()) - } - } - - // 2. Check if state for static policy is consistent - for pod := range tmpAssignments { - for container, cset := range tmpAssignments[pod] { - // None of the cpu in DEFAULT cset should be in s.assignments - if !tmpDefaultCPUset.Intersection(cset).IsEmpty() { - return fmt.Errorf("pod: %s, container: %s cpuset: \"%s\" overlaps with default cpuset \"%s\"", - pod, container, cset.String(), tmpDefaultCPUset.String()) - } - } - } - - // 3. It's possible that the set of available CPUs has changed since - // the state was written. This can be due to for example - // offlining a CPU when kubelet is not running. If this happens, - // CPU manager will run into trouble when later it tries to - // assign non-existent CPUs to containers. Validate that the - // topology that was received during CPU manager startup matches with - // the set of CPUs stored in the state. - totalKnownCPUs := tmpDefaultCPUset.Clone() - tmpCPUSets := []cpuset.CPUSet{} - for pod := range tmpAssignments { - for _, cset := range tmpAssignments[pod] { - tmpCPUSets = append(tmpCPUSets, cset) - } - } - totalKnownCPUs = totalKnownCPUs.Union(tmpCPUSets...) - if !totalKnownCPUs.Equals(allCPUs) { - return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"", - allCPUs.String(), totalKnownCPUs.String()) - - } - - return nil -} - -// GetAllocatableCPUs returns the total set of CPUs available for allocation. -func (p *staticPolicy) GetAllocatableCPUs(s state.State) cpuset.CPUSet { - return p.topology.CPUDetails.CPUs().Difference(p.reservedCPUs) -} - -// GetAvailableCPUs returns the set of unassigned CPUs minus the reserved set. -func (p *staticPolicy) GetAvailableCPUs(s state.State) cpuset.CPUSet { - return s.GetDefaultCPUSet().Difference(p.reservedCPUs) -} - -func (p *staticPolicy) GetAvailablePhysicalCPUs(s state.State) cpuset.CPUSet { - return s.GetDefaultCPUSet().Difference(p.reservedPhysicalCPUs) -} - -func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, cset cpuset.CPUSet) { - // If pod entries to m.cpusToReuse other than the current pod exist, delete them. - for podUID := range p.cpusToReuse { - if podUID != string(pod.UID) { - delete(p.cpusToReuse, podUID) - } - } - // If no cpuset exists for cpusToReuse by this pod yet, create one. - if _, ok := p.cpusToReuse[string(pod.UID)]; !ok { - p.cpusToReuse[string(pod.UID)] = cpuset.New() - } - // Check if the container is an init container. - // If so, add its cpuset to the cpuset of reusable CPUs for any new allocations. - for _, initContainer := range pod.Spec.InitContainers { - if container.Name == initContainer.Name { - if podutil.IsRestartableInitContainer(&initContainer) { - // If the container is a restartable init container, we should not - // reuse its cpuset, as a restartable init container can run with - // regular containers. - break - } - p.cpusToReuse[string(pod.UID)] = p.cpusToReuse[string(pod.UID)].Union(cset) - return - } - } - // Otherwise it is an app container. - // Remove its cpuset from the cpuset of reusable CPUs for any new allocations. - p.cpusToReuse[string(pod.UID)] = p.cpusToReuse[string(pod.UID)].Difference(cset) -} - -func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) { - numCPUs := p.guaranteedCPUs(pod, container) - if numCPUs == 0 { - // container belongs in the shared pool (nothing to do; use default cpuset) - return nil - } - - klog.InfoS("Static policy: Allocate", "pod", klog.KObj(pod), "containerName", container.Name) - // container belongs in an exclusively allocated pool - metrics.CPUManagerPinningRequestsTotal.Inc() - defer func() { - if rerr != nil { - metrics.CPUManagerPinningErrorsTotal.Inc() - if p.options.FullPhysicalCPUsOnly { - metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Inc() - } - return - } - // TODO: move in updateMetricsOnAllocate - if p.options.FullPhysicalCPUsOnly { - // increment only if we know we allocate aligned resources - metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Inc() - } - }() - - if p.options.FullPhysicalCPUsOnly { - if (numCPUs % p.cpuGroupSize) != 0 { - // Since CPU Manager has been enabled requesting strict SMT alignment, it means a guaranteed pod can only be admitted - // if the CPU requested is a multiple of the number of virtual cpus per physical cores. - // In case CPU request is not a multiple of the number of virtual cpus per physical cores the Pod will be put - // in Failed state, with SMTAlignmentError as reason. Since the allocation happens in terms of physical cores - // and the scheduler is responsible for ensuring that the workload goes to a node that has enough CPUs, - // the pod would be placed on a node where there are enough physical cores available to be allocated. - // Just like the behaviour in case of static policy, takeByTopology will try to first allocate CPUs from the same socket - // and only in case the request cannot be sattisfied on a single socket, CPU allocation is done for a workload to occupy all - // CPUs on a physical core. Allocation of individual threads would never have to occur. - return SMTAlignmentError{ - RequestedCPUs: numCPUs, - CpusPerCore: p.cpuGroupSize, - CausedByPhysicalCPUs: false, - } - } - - availablePhysicalCPUs := p.GetAvailablePhysicalCPUs(s).Size() - - // It's legal to reserve CPUs which are not core siblings. In this case the CPU allocator can descend to single cores - // when picking CPUs. This will void the guarantee of FullPhysicalCPUsOnly. To prevent this, we need to additionally consider - // all the core siblings of the reserved CPUs as unavailable when computing the free CPUs, before to start the actual allocation. - // This way, by construction all possible CPUs allocation whose number is multiple of the SMT level are now correct again. - if numCPUs > availablePhysicalCPUs { - return SMTAlignmentError{ - RequestedCPUs: numCPUs, - CpusPerCore: p.cpuGroupSize, - AvailablePhysicalCPUs: availablePhysicalCPUs, - CausedByPhysicalCPUs: true, - } - } - } - if cset, ok := s.GetCPUSet(string(pod.UID), container.Name); ok { - p.updateCPUsToReuse(pod, container, cset) - klog.InfoS("Static policy: container already present in state, skipping", "pod", klog.KObj(pod), "containerName", container.Name) - return nil - } - - // Call Topology Manager to get the aligned socket affinity across all hint providers. - hint := p.affinity.GetAffinity(string(pod.UID), container.Name) - klog.InfoS("Topology Affinity", "pod", klog.KObj(pod), "containerName", container.Name, "affinity", hint) - - // Allocate CPUs according to the NUMA affinity contained in the hint. - cpuAllocation, err := p.allocateCPUs(s, numCPUs, hint.NUMANodeAffinity, p.cpusToReuse[string(pod.UID)]) - if err != nil { - klog.ErrorS(err, "Unable to allocate CPUs", "pod", klog.KObj(pod), "containerName", container.Name, "numCPUs", numCPUs) - return err - } - - s.SetCPUSet(string(pod.UID), container.Name, cpuAllocation.CPUs) - p.updateCPUsToReuse(pod, container, cpuAllocation.CPUs) - p.updateMetricsOnAllocate(s, cpuAllocation) - - klog.V(4).InfoS("Allocated exclusive CPUs", "pod", klog.KObj(pod), "containerName", container.Name, "cpuset", cpuAllocation.CPUs.String()) - return nil -} - -// getAssignedCPUsOfSiblings returns assigned cpus of given container's siblings(all containers other than the given container) in the given pod `podUID`. -func getAssignedCPUsOfSiblings(s state.State, podUID string, containerName string) cpuset.CPUSet { - assignments := s.GetCPUAssignments() - cset := cpuset.New() - for name, cpus := range assignments[podUID] { - if containerName == name { - continue - } - cset = cset.Union(cpus) - } - return cset -} - -func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerName string) error { - klog.InfoS("Static policy: RemoveContainer", "podUID", podUID, "containerName", containerName) - cpusInUse := getAssignedCPUsOfSiblings(s, podUID, containerName) - if toRelease, ok := s.GetCPUSet(podUID, containerName); ok { - s.Delete(podUID, containerName) - // Mutate the shared pool, adding released cpus. - toRelease = toRelease.Difference(cpusInUse) - s.SetDefaultCPUSet(s.GetDefaultCPUSet().Union(toRelease)) - p.updateMetricsOnRelease(s, toRelease) - - } - return nil -} - -func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (topology.Allocation, error) { - klog.InfoS("AllocateCPUs", "numCPUs", numCPUs, "socket", numaAffinity) - - allocatableCPUs := p.GetAvailableCPUs(s).Union(reusableCPUs) - - // If there are aligned CPUs in numaAffinity, attempt to take those first. - result := topology.EmptyAllocation() - if numaAffinity != nil { - alignedCPUs := p.getAlignedCPUs(numaAffinity, allocatableCPUs) - - numAlignedToAlloc := alignedCPUs.Size() - if numCPUs < numAlignedToAlloc { - numAlignedToAlloc = numCPUs - } - - allocatedCPUs, err := p.takeByTopology(alignedCPUs, numAlignedToAlloc) - if err != nil { - return topology.EmptyAllocation(), err - } - - result.CPUs = result.CPUs.Union(allocatedCPUs) - } - - // Get any remaining CPUs from what's leftover after attempting to grab aligned ones. - remainingCPUs, err := p.takeByTopology(allocatableCPUs.Difference(result.CPUs), numCPUs-result.CPUs.Size()) - if err != nil { - return topology.EmptyAllocation(), err - } - result.CPUs = result.CPUs.Union(remainingCPUs) - result.Aligned = p.topology.CheckAlignment(result.CPUs) - - // Remove allocated CPUs from the shared CPUSet. - s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result.CPUs)) - - klog.InfoS("AllocateCPUs", "result", result.String()) - return result, nil -} - -func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int { - qos := v1qos.GetPodQOS(pod) - if qos != v1.PodQOSGuaranteed { - klog.V(5).InfoS("Exclusive CPU allocation skipped, pod QoS is not guaranteed", "pod", klog.KObj(pod), "containerName", container.Name, "qos", qos) - return 0 - } - cpuQuantity := container.Resources.Requests[v1.ResourceCPU] - // In-place pod resize feature makes Container.Resources field mutable for CPU & memory. - // AllocatedResources holds the value of Container.Resources.Requests when the pod was admitted. - // We should return this value because this is what kubelet agreed to allocate for the container - // and the value configured with runtime. - if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - containerStatuses := pod.Status.ContainerStatuses - if podutil.IsRestartableInitContainer(container) { - if len(pod.Status.InitContainerStatuses) != 0 { - containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...) - } - } - if cs, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok { - cpuQuantity = cs.AllocatedResources[v1.ResourceCPU] - } - } - cpuValue := cpuQuantity.Value() - if cpuValue*1000 != cpuQuantity.MilliValue() { - klog.V(5).InfoS("Exclusive CPU allocation skipped, pod requested non-integral CPUs", "pod", klog.KObj(pod), "containerName", container.Name, "cpu", cpuValue) - return 0 - } - // Safe downcast to do for all systems with < 2.1 billion CPUs. - // Per the language spec, `int` is guaranteed to be at least 32 bits wide. - // https://golang.org/ref/spec#Numeric_types - return int(cpuQuantity.Value()) -} - -func (p *staticPolicy) podGuaranteedCPUs(pod *v1.Pod) int { - // The maximum of requested CPUs by init containers. - requestedByInitContainers := 0 - requestedByRestartableInitContainers := 0 - for _, container := range pod.Spec.InitContainers { - if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok { - continue - } - requestedCPU := p.guaranteedCPUs(pod, &container) - // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission - // for the detail. - if podutil.IsRestartableInitContainer(&container) { - requestedByRestartableInitContainers += requestedCPU - } else if requestedByRestartableInitContainers+requestedCPU > requestedByInitContainers { - requestedByInitContainers = requestedByRestartableInitContainers + requestedCPU - } - } - - // The sum of requested CPUs by app containers. - requestedByAppContainers := 0 - for _, container := range pod.Spec.Containers { - if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok { - continue - } - requestedByAppContainers += p.guaranteedCPUs(pod, &container) - } - - requestedByLongRunningContainers := requestedByAppContainers + requestedByRestartableInitContainers - if requestedByInitContainers > requestedByLongRunningContainers { - return requestedByInitContainers - } - return requestedByLongRunningContainers -} - -func (p *staticPolicy) takeByTopology(availableCPUs cpuset.CPUSet, numCPUs int) (cpuset.CPUSet, error) { - cpuSortingStrategy := CPUSortingStrategyPacked - if p.options.DistributeCPUsAcrossCores { - cpuSortingStrategy = CPUSortingStrategySpread - } - - if p.options.DistributeCPUsAcrossNUMA { - cpuGroupSize := 1 - if p.options.FullPhysicalCPUsOnly { - cpuGroupSize = p.cpuGroupSize - } - return takeByTopologyNUMADistributed(p.topology, availableCPUs, numCPUs, cpuGroupSize, cpuSortingStrategy) - } - - return takeByTopologyNUMAPacked(p.topology, availableCPUs, numCPUs, cpuSortingStrategy, p.options.PreferAlignByUncoreCacheOption) -} - -func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - // Get a count of how many guaranteed CPUs have been requested. - requested := p.guaranteedCPUs(pod, container) - - // Number of required CPUs is not an integer or a container is not part of the Guaranteed QoS class. - // It will be treated by the TopologyManager as having no preference and cause it to ignore this - // resource when considering pod alignment. - // In terms of hints, this is equal to: TopologyHints[NUMANodeAffinity: nil, Preferred: true]. - if requested == 0 { - return nil - } - - // Short circuit to regenerate the same hints if there are already - // guaranteed CPUs allocated to the Container. This might happen after a - // kubelet restart, for example. - if allocated, exists := s.GetCPUSet(string(pod.UID), container.Name); exists { - if allocated.Size() != requested { - klog.InfoS("CPUs already allocated to container with different number than request", "pod", klog.KObj(pod), "containerName", container.Name, "requestedSize", requested, "allocatedSize", allocated.Size()) - // An empty list of hints will be treated as a preference that cannot be satisfied. - // In definition of hints this is equal to: TopologyHint[NUMANodeAffinity: nil, Preferred: false]. - // For all but the best-effort policy, the Topology Manager will throw a pod-admission error. - return map[string][]topologymanager.TopologyHint{ - string(v1.ResourceCPU): {}, - } - } - klog.InfoS("Regenerating TopologyHints for CPUs already allocated", "pod", klog.KObj(pod), "containerName", container.Name) - return map[string][]topologymanager.TopologyHint{ - string(v1.ResourceCPU): p.generateCPUTopologyHints(allocated, cpuset.CPUSet{}, requested), - } - } - - // Get a list of available CPUs. - available := p.GetAvailableCPUs(s) - - // Get a list of reusable CPUs (e.g. CPUs reused from initContainers). - // It should be an empty CPUSet for a newly created pod. - reusable := p.cpusToReuse[string(pod.UID)] - - // Generate hints. - cpuHints := p.generateCPUTopologyHints(available, reusable, requested) - klog.InfoS("TopologyHints generated", "pod", klog.KObj(pod), "containerName", container.Name, "cpuHints", cpuHints) - - return map[string][]topologymanager.TopologyHint{ - string(v1.ResourceCPU): cpuHints, - } -} - -func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint { - // Get a count of how many guaranteed CPUs have been requested by Pod. - requested := p.podGuaranteedCPUs(pod) - - // Number of required CPUs is not an integer or a pod is not part of the Guaranteed QoS class. - // It will be treated by the TopologyManager as having no preference and cause it to ignore this - // resource when considering pod alignment. - // In terms of hints, this is equal to: TopologyHints[NUMANodeAffinity: nil, Preferred: true]. - if requested == 0 { - return nil - } - - assignedCPUs := cpuset.New() - for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - requestedByContainer := p.guaranteedCPUs(pod, &container) - // Short circuit to regenerate the same hints if there are already - // guaranteed CPUs allocated to the Container. This might happen after a - // kubelet restart, for example. - if allocated, exists := s.GetCPUSet(string(pod.UID), container.Name); exists { - if allocated.Size() != requestedByContainer { - klog.InfoS("CPUs already allocated to container with different number than request", "pod", klog.KObj(pod), "containerName", container.Name, "allocatedSize", requested, "requestedByContainer", requestedByContainer, "allocatedSize", allocated.Size()) - // An empty list of hints will be treated as a preference that cannot be satisfied. - // In definition of hints this is equal to: TopologyHint[NUMANodeAffinity: nil, Preferred: false]. - // For all but the best-effort policy, the Topology Manager will throw a pod-admission error. - return map[string][]topologymanager.TopologyHint{ - string(v1.ResourceCPU): {}, - } - } - // A set of CPUs already assigned to containers in this pod - assignedCPUs = assignedCPUs.Union(allocated) - } - } - if assignedCPUs.Size() == requested { - klog.InfoS("Regenerating TopologyHints for CPUs already allocated", "pod", klog.KObj(pod)) - return map[string][]topologymanager.TopologyHint{ - string(v1.ResourceCPU): p.generateCPUTopologyHints(assignedCPUs, cpuset.CPUSet{}, requested), - } - } - - // Get a list of available CPUs. - available := p.GetAvailableCPUs(s) - - // Get a list of reusable CPUs (e.g. CPUs reused from initContainers). - // It should be an empty CPUSet for a newly created pod. - reusable := p.cpusToReuse[string(pod.UID)] - - // Ensure any CPUs already assigned to containers in this pod are included as part of the hint generation. - reusable = reusable.Union(assignedCPUs) - - // Generate hints. - cpuHints := p.generateCPUTopologyHints(available, reusable, requested) - klog.InfoS("TopologyHints generated", "pod", klog.KObj(pod), "cpuHints", cpuHints) - - return map[string][]topologymanager.TopologyHint{ - string(v1.ResourceCPU): cpuHints, - } -} - -// generateCPUTopologyHints generates a set of TopologyHints given the set of -// available CPUs and the number of CPUs being requested. -// -// It follows the convention of marking all hints that have the same number of -// bits set as the narrowest matching NUMANodeAffinity with 'Preferred: true', and -// marking all others with 'Preferred: false'. -func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reusableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint { - // Initialize minAffinitySize to include all NUMA Nodes. - minAffinitySize := p.topology.CPUDetails.NUMANodes().Size() - - // Iterate through all combinations of numa nodes bitmask and build hints from them. - hints := []topologymanager.TopologyHint{} - bitmask.IterateBitMasks(p.topology.CPUDetails.NUMANodes().List(), func(mask bitmask.BitMask) { - // First, update minAffinitySize for the current request size. - cpusInMask := p.topology.CPUDetails.CPUsInNUMANodes(mask.GetBits()...).Size() - if cpusInMask >= request && mask.Count() < minAffinitySize { - minAffinitySize = mask.Count() - } - - // Then check to see if we have enough CPUs available on the current - // numa node bitmask to satisfy the CPU request. - numMatching := 0 - for _, c := range reusableCPUs.List() { - // Disregard this mask if its NUMANode isn't part of it. - if !mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) { - return - } - numMatching++ - } - - // Finally, check to see if enough available CPUs remain on the current - // NUMA node combination to satisfy the CPU request. - for _, c := range availableCPUs.List() { - if mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) { - numMatching++ - } - } - - // If they don't, then move onto the next combination. - if numMatching < request { - return - } - - // Otherwise, create a new hint from the numa node bitmask and add it to the - // list of hints. We set all hint preferences to 'false' on the first - // pass through. - hints = append(hints, topologymanager.TopologyHint{ - NUMANodeAffinity: mask, - Preferred: false, - }) - }) - - // Loop back through all hints and update the 'Preferred' field based on - // counting the number of bits sets in the affinity mask and comparing it - // to the minAffinitySize. Only those with an equal number of bits set (and - // with a minimal set of numa nodes) will be considered preferred. - for i := range hints { - if p.options.AlignBySocket && p.isHintSocketAligned(hints[i], minAffinitySize) { - hints[i].Preferred = true - continue - } - if hints[i].NUMANodeAffinity.Count() == minAffinitySize { - hints[i].Preferred = true - } - } - - return hints -} - -// isHintSocketAligned function return true if numa nodes in hint are socket aligned. -func (p *staticPolicy) isHintSocketAligned(hint topologymanager.TopologyHint, minAffinitySize int) bool { - numaNodesBitMask := hint.NUMANodeAffinity.GetBits() - numaNodesPerSocket := p.topology.NumNUMANodes / p.topology.NumSockets - if numaNodesPerSocket == 0 { - return false - } - // minSockets refers to minimum number of socket required to satify allocation. - // A hint is considered socket aligned if sockets across which numa nodes span is equal to minSockets - minSockets := (minAffinitySize + numaNodesPerSocket - 1) / numaNodesPerSocket - return p.topology.CPUDetails.SocketsInNUMANodes(numaNodesBitMask...).Size() == minSockets -} - -// getAlignedCPUs return set of aligned CPUs based on numa affinity mask and configured policy options. -func (p *staticPolicy) getAlignedCPUs(numaAffinity bitmask.BitMask, allocatableCPUs cpuset.CPUSet) cpuset.CPUSet { - alignedCPUs := cpuset.New() - numaBits := numaAffinity.GetBits() - - // If align-by-socket policy option is enabled, NUMA based hint is expanded to - // socket aligned hint. It will ensure that first socket aligned available CPUs are - // allocated before we try to find CPUs across socket to satisfy allocation request. - if p.options.AlignBySocket { - socketBits := p.topology.CPUDetails.SocketsInNUMANodes(numaBits...).UnsortedList() - for _, socketID := range socketBits { - alignedCPUs = alignedCPUs.Union(allocatableCPUs.Intersection(p.topology.CPUDetails.CPUsInSockets(socketID))) - } - return alignedCPUs - } - - for _, numaNodeID := range numaBits { - alignedCPUs = alignedCPUs.Union(allocatableCPUs.Intersection(p.topology.CPUDetails.CPUsInNUMANodes(numaNodeID))) - } - - return alignedCPUs -} - -func (p *staticPolicy) initializeMetrics(s state.State) { - metrics.CPUManagerSharedPoolSizeMilliCores.Set(float64(p.GetAvailableCPUs(s).Size() * 1000)) - metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Add(0) // ensure the value exists - metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Add(0) // ensure the value exists - metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedUncoreCache).Add(0) // ensure the value exists - totalAssignedCPUs := getTotalAssignedExclusiveCPUs(s) - metrics.CPUManagerExclusiveCPUsAllocationCount.Set(float64(totalAssignedCPUs.Size())) - updateAllocationPerNUMAMetric(p.topology, totalAssignedCPUs) -} - -func (p *staticPolicy) updateMetricsOnAllocate(s state.State, cpuAlloc topology.Allocation) { - ncpus := cpuAlloc.CPUs.Size() - metrics.CPUManagerExclusiveCPUsAllocationCount.Add(float64(ncpus)) - metrics.CPUManagerSharedPoolSizeMilliCores.Add(float64(-ncpus * 1000)) - if cpuAlloc.Aligned.UncoreCache { - metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedUncoreCache).Inc() - } - totalAssignedCPUs := getTotalAssignedExclusiveCPUs(s) - updateAllocationPerNUMAMetric(p.topology, totalAssignedCPUs) -} - -func (p *staticPolicy) updateMetricsOnRelease(s state.State, cset cpuset.CPUSet) { - ncpus := cset.Size() - metrics.CPUManagerExclusiveCPUsAllocationCount.Add(float64(-ncpus)) - metrics.CPUManagerSharedPoolSizeMilliCores.Add(float64(ncpus * 1000)) - totalAssignedCPUs := getTotalAssignedExclusiveCPUs(s) - updateAllocationPerNUMAMetric(p.topology, totalAssignedCPUs.Difference(cset)) -} - -func getTotalAssignedExclusiveCPUs(s state.State) cpuset.CPUSet { - totalAssignedCPUs := cpuset.New() - for _, assignment := range s.GetCPUAssignments() { - for _, cset := range assignment { - totalAssignedCPUs = totalAssignedCPUs.Union(cset) - } - - } - return totalAssignedCPUs -} - -func updateAllocationPerNUMAMetric(topo *topology.CPUTopology, allocatedCPUs cpuset.CPUSet) { - numaCount := make(map[int]int) - - // Count CPUs allocated per NUMA node - for _, cpuID := range allocatedCPUs.UnsortedList() { - numaNode, err := topo.CPUNUMANodeID(cpuID) - if err != nil { - //NOTE: We are logging the error but it is highly unlikely to happen as the CPUset - // is already computed, evaluated and there is no room for user tampering. - klog.ErrorS(err, "Unable to determine NUMA node", "cpuID", cpuID) - } - numaCount[numaNode]++ - } - - // Update metric - for numaNode, count := range numaCount { - metrics.CPUManagerAllocationPerNUMA.WithLabelValues(strconv.Itoa(numaNode)).Set(float64(count)) - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go deleted file mode 100644 index 564c3482c04..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "encoding/json" - "fmt" - "hash/fnv" - "strings" - - "k8s.io/apimachinery/pkg/util/dump" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" -) - -var _ checkpointmanager.Checkpoint = &CPUManagerCheckpointV1{} -var _ checkpointmanager.Checkpoint = &CPUManagerCheckpointV2{} -var _ checkpointmanager.Checkpoint = &CPUManagerCheckpoint{} - -// CPUManagerCheckpoint struct is used to store cpu/pod assignments in a checkpoint in v2 format -type CPUManagerCheckpoint struct { - PolicyName string `json:"policyName"` - DefaultCPUSet string `json:"defaultCpuSet"` - Entries map[string]map[string]string `json:"entries,omitempty"` - Checksum checksum.Checksum `json:"checksum"` -} - -// CPUManagerCheckpointV1 struct is used to store cpu/pod assignments in a checkpoint in v1 format -type CPUManagerCheckpointV1 struct { - PolicyName string `json:"policyName"` - DefaultCPUSet string `json:"defaultCpuSet"` - Entries map[string]string `json:"entries,omitempty"` - Checksum checksum.Checksum `json:"checksum"` -} - -// CPUManagerCheckpointV2 struct is used to store cpu/pod assignments in a checkpoint in v2 format -type CPUManagerCheckpointV2 = CPUManagerCheckpoint - -// NewCPUManagerCheckpoint returns an instance of Checkpoint -func NewCPUManagerCheckpoint() *CPUManagerCheckpoint { - //nolint:staticcheck // unexported-type-in-api user-facing error message - return newCPUManagerCheckpointV2() -} - -func newCPUManagerCheckpointV1() *CPUManagerCheckpointV1 { - return &CPUManagerCheckpointV1{ - Entries: make(map[string]string), - } -} - -func newCPUManagerCheckpointV2() *CPUManagerCheckpointV2 { - return &CPUManagerCheckpointV2{ - Entries: make(map[string]map[string]string), - } -} - -// MarshalCheckpoint returns marshalled checkpoint in v1 format -func (cp *CPUManagerCheckpointV1) MarshalCheckpoint() ([]byte, error) { - // make sure checksum wasn't set before so it doesn't affect output checksum - cp.Checksum = 0 - cp.Checksum = checksum.New(cp) - return json.Marshal(*cp) -} - -// MarshalCheckpoint returns marshalled checkpoint in v2 format -func (cp *CPUManagerCheckpointV2) MarshalCheckpoint() ([]byte, error) { - // make sure checksum wasn't set before so it doesn't affect output checksum - cp.Checksum = 0 - cp.Checksum = checksum.New(cp) - return json.Marshal(*cp) -} - -// UnmarshalCheckpoint tries to unmarshal passed bytes to checkpoint in v1 format -func (cp *CPUManagerCheckpointV1) UnmarshalCheckpoint(blob []byte) error { - return json.Unmarshal(blob, cp) -} - -// UnmarshalCheckpoint tries to unmarshal passed bytes to checkpoint in v2 format -func (cp *CPUManagerCheckpointV2) UnmarshalCheckpoint(blob []byte) error { - return json.Unmarshal(blob, cp) -} - -// VerifyChecksum verifies that current checksum of checkpoint is valid in v1 format -func (cp *CPUManagerCheckpointV1) VerifyChecksum() error { - if cp.Checksum == 0 { - // accept empty checksum for compatibility with old file backend - return nil - } - - ck := cp.Checksum - cp.Checksum = 0 - object := dump.ForHash(cp) - object = strings.Replace(object, "CPUManagerCheckpointV1", "CPUManagerCheckpoint", 1) - cp.Checksum = ck - - hash := fnv.New32a() - fmt.Fprintf(hash, "%v", object) - actualCS := checksum.Checksum(hash.Sum32()) - if cp.Checksum != actualCS { - return &errors.CorruptCheckpointError{ - ActualCS: uint64(actualCS), - ExpectedCS: uint64(cp.Checksum), - } - } - - return nil -} - -// VerifyChecksum verifies that current checksum of checkpoint is valid in v2 format -func (cp *CPUManagerCheckpointV2) VerifyChecksum() error { - if cp.Checksum == 0 { - // accept empty checksum for compatibility with old file backend - return nil - } - ck := cp.Checksum - cp.Checksum = 0 - err := ck.Verify(cp) - cp.Checksum = ck - return err -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go deleted file mode 100644 index 352fddfb9cd..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "k8s.io/utils/cpuset" -) - -// ContainerCPUAssignments type used in cpu manager state -type ContainerCPUAssignments map[string]map[string]cpuset.CPUSet - -// Clone returns a copy of ContainerCPUAssignments -func (as ContainerCPUAssignments) Clone() ContainerCPUAssignments { - ret := make(ContainerCPUAssignments, len(as)) - for pod := range as { - ret[pod] = make(map[string]cpuset.CPUSet, len(as[pod])) - for container, cset := range as[pod] { - ret[pod][container] = cset - } - } - return ret -} - -// Reader interface used to read current cpu/pod assignment state -type Reader interface { - GetCPUSet(podUID string, containerName string) (cpuset.CPUSet, bool) - GetDefaultCPUSet() cpuset.CPUSet - GetCPUSetOrDefault(podUID string, containerName string) cpuset.CPUSet - GetCPUAssignments() ContainerCPUAssignments -} - -type writer interface { - SetCPUSet(podUID string, containerName string, cpuset cpuset.CPUSet) - SetDefaultCPUSet(cpuset cpuset.CPUSet) - SetCPUAssignments(ContainerCPUAssignments) - Delete(podUID string, containerName string) - ClearState() -} - -// State interface provides methods for tracking and setting cpu/pod assignment -type State interface { - Reader - writer -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go deleted file mode 100644 index bda90ba1f4c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "fmt" - "path/filepath" - "sync" - - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/utils/cpuset" -) - -var _ State = &stateCheckpoint{} - -type stateCheckpoint struct { - mux sync.RWMutex - policyName string - cache State - checkpointManager checkpointmanager.CheckpointManager - checkpointName string - initialContainers containermap.ContainerMap -} - -// NewCheckpointState creates new State for keeping track of cpu/pod assignment with checkpoint backend -func NewCheckpointState(stateDir, checkpointName, policyName string, initialContainers containermap.ContainerMap) (State, error) { - checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir) - if err != nil { - return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err) - } - stateCheckpoint := &stateCheckpoint{ - cache: NewMemoryState(), - policyName: policyName, - checkpointManager: checkpointManager, - checkpointName: checkpointName, - initialContainers: initialContainers, - } - - if err := stateCheckpoint.restoreState(); err != nil { - //nolint:staticcheck // ST1005 user-facing error message - return nil, fmt.Errorf("could not restore state from checkpoint: %v, please drain this node and delete the CPU manager checkpoint file %q before restarting Kubelet", - err, filepath.Join(stateDir, checkpointName)) - } - - return stateCheckpoint, nil -} - -// migrateV1CheckpointToV2Checkpoint() converts checkpoints from the v1 format to the v2 format -func (sc *stateCheckpoint) migrateV1CheckpointToV2Checkpoint(src *CPUManagerCheckpointV1, dst *CPUManagerCheckpointV2) error { - if src.PolicyName != "" { - dst.PolicyName = src.PolicyName - } - if src.DefaultCPUSet != "" { - dst.DefaultCPUSet = src.DefaultCPUSet - } - for containerID, cset := range src.Entries { - podUID, containerName, err := sc.initialContainers.GetContainerRef(containerID) - if err != nil { - return fmt.Errorf("containerID '%v' not found in initial containers list", containerID) - } - if dst.Entries == nil { - dst.Entries = make(map[string]map[string]string) - } - if _, exists := dst.Entries[podUID]; !exists { - dst.Entries[podUID] = make(map[string]string) - } - dst.Entries[podUID][containerName] = cset - } - return nil -} - -// restores state from a checkpoint and creates it if it doesn't exist -func (sc *stateCheckpoint) restoreState() error { - sc.mux.Lock() - defer sc.mux.Unlock() - var err error - - checkpointV1 := newCPUManagerCheckpointV1() - checkpointV2 := newCPUManagerCheckpointV2() - - if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpointV1); err != nil { - checkpointV1 = &CPUManagerCheckpointV1{} // reset it back to 0 - if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpointV2); err != nil { - if err == errors.ErrCheckpointNotFound { - return sc.storeState() - } - return err - } - } - - if err = sc.migrateV1CheckpointToV2Checkpoint(checkpointV1, checkpointV2); err != nil { - return fmt.Errorf("error migrating v1 checkpoint state to v2 checkpoint state: %s", err) - } - - if sc.policyName != checkpointV2.PolicyName { - return fmt.Errorf("configured policy %q differs from state checkpoint policy %q", sc.policyName, checkpointV2.PolicyName) - } - - var tmpDefaultCPUSet cpuset.CPUSet - if tmpDefaultCPUSet, err = cpuset.Parse(checkpointV2.DefaultCPUSet); err != nil { - return fmt.Errorf("could not parse default cpu set %q: %v", checkpointV2.DefaultCPUSet, err) - } - - var tmpContainerCPUSet cpuset.CPUSet - tmpAssignments := ContainerCPUAssignments{} - for pod := range checkpointV2.Entries { - tmpAssignments[pod] = make(map[string]cpuset.CPUSet, len(checkpointV2.Entries[pod])) - for container, cpuString := range checkpointV2.Entries[pod] { - if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil { - return fmt.Errorf("could not parse cpuset %q for container %q in pod %q: %v", cpuString, container, pod, err) - } - tmpAssignments[pod][container] = tmpContainerCPUSet - } - } - - sc.cache.SetDefaultCPUSet(tmpDefaultCPUSet) - sc.cache.SetCPUAssignments(tmpAssignments) - - klog.V(2).InfoS("State checkpoint: restored state from checkpoint") - klog.V(2).InfoS("State checkpoint: defaultCPUSet", "defaultCpuSet", tmpDefaultCPUSet.String()) - - return nil -} - -// saves state to a checkpoint, caller is responsible for locking -func (sc *stateCheckpoint) storeState() error { - checkpoint := NewCPUManagerCheckpoint() - checkpoint.PolicyName = sc.policyName - checkpoint.DefaultCPUSet = sc.cache.GetDefaultCPUSet().String() - - assignments := sc.cache.GetCPUAssignments() - for pod := range assignments { - checkpoint.Entries[pod] = make(map[string]string, len(assignments[pod])) - for container, cset := range assignments[pod] { - checkpoint.Entries[pod][container] = cset.String() - } - } - - err := sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint) - if err != nil { - klog.ErrorS(err, "Failed to save checkpoint") - return err - } - return nil -} - -// GetCPUSet returns current CPU set -func (sc *stateCheckpoint) GetCPUSet(podUID string, containerName string) (cpuset.CPUSet, bool) { - sc.mux.RLock() - defer sc.mux.RUnlock() - - res, ok := sc.cache.GetCPUSet(podUID, containerName) - return res, ok -} - -// GetDefaultCPUSet returns default CPU set -func (sc *stateCheckpoint) GetDefaultCPUSet() cpuset.CPUSet { - sc.mux.RLock() - defer sc.mux.RUnlock() - - return sc.cache.GetDefaultCPUSet() -} - -// GetCPUSetOrDefault returns current CPU set, or default one if it wasn't changed -func (sc *stateCheckpoint) GetCPUSetOrDefault(podUID string, containerName string) cpuset.CPUSet { - sc.mux.RLock() - defer sc.mux.RUnlock() - - return sc.cache.GetCPUSetOrDefault(podUID, containerName) -} - -// GetCPUAssignments returns current CPU to pod assignments -func (sc *stateCheckpoint) GetCPUAssignments() ContainerCPUAssignments { - sc.mux.RLock() - defer sc.mux.RUnlock() - - return sc.cache.GetCPUAssignments() -} - -// SetCPUSet sets CPU set -func (sc *stateCheckpoint) SetCPUSet(podUID string, containerName string, cset cpuset.CPUSet) { - sc.mux.Lock() - defer sc.mux.Unlock() - sc.cache.SetCPUSet(podUID, containerName, cset) - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName) - } -} - -// SetDefaultCPUSet sets default CPU set -func (sc *stateCheckpoint) SetDefaultCPUSet(cset cpuset.CPUSet) { - sc.mux.Lock() - defer sc.mux.Unlock() - sc.cache.SetDefaultCPUSet(cset) - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint") - } -} - -// SetCPUAssignments sets CPU to pod assignments -func (sc *stateCheckpoint) SetCPUAssignments(a ContainerCPUAssignments) { - sc.mux.Lock() - defer sc.mux.Unlock() - sc.cache.SetCPUAssignments(a) - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint") - } -} - -// Delete deletes assignment for specified pod -func (sc *stateCheckpoint) Delete(podUID string, containerName string) { - sc.mux.Lock() - defer sc.mux.Unlock() - sc.cache.Delete(podUID, containerName) - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName) - } -} - -// ClearState clears the state and saves it in a checkpoint -func (sc *stateCheckpoint) ClearState() { - sc.mux.Lock() - defer sc.mux.Unlock() - sc.cache.ClearState() - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint") - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go deleted file mode 100644 index cb01ea92609..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "sync" - - "k8s.io/klog/v2" - "k8s.io/utils/cpuset" -) - -type stateMemory struct { - sync.RWMutex - assignments ContainerCPUAssignments - defaultCPUSet cpuset.CPUSet -} - -var _ State = &stateMemory{} - -// NewMemoryState creates new State for keeping track of cpu/pod assignment -func NewMemoryState() State { - klog.InfoS("Initialized new in-memory state store") - return &stateMemory{ - assignments: ContainerCPUAssignments{}, - defaultCPUSet: cpuset.New(), - } -} - -func (s *stateMemory) GetCPUSet(podUID string, containerName string) (cpuset.CPUSet, bool) { - s.RLock() - defer s.RUnlock() - - res, ok := s.assignments[podUID][containerName] - return res.Clone(), ok -} - -func (s *stateMemory) GetDefaultCPUSet() cpuset.CPUSet { - s.RLock() - defer s.RUnlock() - - return s.defaultCPUSet.Clone() -} - -func (s *stateMemory) GetCPUSetOrDefault(podUID string, containerName string) cpuset.CPUSet { - if res, ok := s.GetCPUSet(podUID, containerName); ok { - return res - } - return s.GetDefaultCPUSet() -} - -func (s *stateMemory) GetCPUAssignments() ContainerCPUAssignments { - s.RLock() - defer s.RUnlock() - return s.assignments.Clone() -} - -func (s *stateMemory) SetCPUSet(podUID string, containerName string, cset cpuset.CPUSet) { - s.Lock() - defer s.Unlock() - - if _, ok := s.assignments[podUID]; !ok { - s.assignments[podUID] = make(map[string]cpuset.CPUSet) - } - - s.assignments[podUID][containerName] = cset - klog.InfoS("Updated desired CPUSet", "podUID", podUID, "containerName", containerName, "cpuSet", cset) -} - -func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) { - s.Lock() - defer s.Unlock() - - s.defaultCPUSet = cset - klog.InfoS("Updated default CPUSet", "cpuSet", cset) -} - -func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) { - s.Lock() - defer s.Unlock() - - s.assignments = a.Clone() - klog.InfoS("Updated CPUSet assignments", "assignments", a) -} - -func (s *stateMemory) Delete(podUID string, containerName string) { - s.Lock() - defer s.Unlock() - - delete(s.assignments[podUID], containerName) - if len(s.assignments[podUID]) == 0 { - delete(s.assignments, podUID) - } - klog.V(2).InfoS("Deleted CPUSet assignment", "podUID", podUID, "containerName", containerName) -} - -func (s *stateMemory) ClearState() { - s.Lock() - defer s.Unlock() - - s.defaultCPUSet = cpuset.CPUSet{} - s.assignments = make(ContainerCPUAssignments) - klog.V(2).InfoS("Cleared state") -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/alignment.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/alignment.go deleted file mode 100644 index 4f3dfe29762..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/alignment.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topology - -import ( - "fmt" - - "k8s.io/utils/cpuset" -) - -// Alignment is metadata about a cpuset allocation -type Alignment struct { - // UncoreCache is true if all the CPUs are uncore-cache aligned, - // IOW if they all share the same Uncore cache block. - // If the allocated CPU count is greater than a Uncore Group size, - // CPUs can't be uncore-aligned; otherwise, they are. - // This flag tracks alignment, not interference or lack thereof. - UncoreCache bool -} - -func (ca Alignment) String() string { - return fmt.Sprintf("aligned=", ca.UncoreCache) -} - -// Allocation represents a CPU set plus alignment metadata -type Allocation struct { - CPUs cpuset.CPUSet - Aligned Alignment -} - -func (ca Allocation) String() string { - return ca.CPUs.String() + " " + ca.Aligned.String() -} - -// EmptyAllocation returns a new zero-valued CPU allocation. Please note that -// a empty cpuset is aligned according to every possible way we can consider -func EmptyAllocation() Allocation { - return Allocation{ - CPUs: cpuset.New(), - Aligned: Alignment{ - UncoreCache: true, - }, - } -} - -func isAlignedAtUncoreCache(topo *CPUTopology, cpuList ...int) bool { - if len(cpuList) <= 1 { - return true - } - reference, ok := topo.CPUDetails[cpuList[0]] - if !ok { - return false - } - for _, cpu := range cpuList[1:] { - info, ok := topo.CPUDetails[cpu] - if !ok { - return false - } - if info.UncoreCacheID != reference.UncoreCacheID { - return false - } - } - return true -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/doc.go deleted file mode 100644 index 8a22c5db7e6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package topology contains helpers for the CPU manager. -package topology diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go deleted file mode 100644 index 36291e6f5af..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go +++ /dev/null @@ -1,398 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topology - -import ( - "fmt" - - cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/klog/v2" - "k8s.io/utils/cpuset" -) - -// NUMANodeInfo is a map from NUMANode ID to a list of CPU IDs associated with -// that NUMANode. -type NUMANodeInfo map[int]cpuset.CPUSet - -// CPUDetails is a map from CPU ID to Core ID, Socket ID, and NUMA ID. -type CPUDetails map[int]CPUInfo - -// CPUTopology contains details of node cpu, where : -// CPU - logical CPU, cadvisor - thread -// Core - physical CPU, cadvisor - Core -// Socket - socket, cadvisor - Socket -// NUMA Node - NUMA cell, cadvisor - Node -// UncoreCache - Split L3 Cache Topology, cadvisor -type CPUTopology struct { - NumCPUs int - NumCores int - NumUncoreCache int - NumSockets int - NumNUMANodes int - CPUDetails CPUDetails -} - -// CPUsPerCore returns the number of logical CPUs are associated with -// each core. -func (topo *CPUTopology) CPUsPerCore() int { - if topo.NumCores == 0 { - return 0 - } - return topo.NumCPUs / topo.NumCores -} - -// CPUsPerSocket returns the number of logical CPUs are associated with -// each socket. -func (topo *CPUTopology) CPUsPerSocket() int { - if topo.NumSockets == 0 { - return 0 - } - return topo.NumCPUs / topo.NumSockets -} - -// CPUsPerUncore returns the number of logicial CPUs that are associated with -// each UncoreCache -func (topo *CPUTopology) CPUsPerUncore() int { - if topo.NumUncoreCache == 0 { - return 0 - } - return topo.NumCPUs / topo.NumUncoreCache -} - -// CPUCoreID returns the physical core ID which the given logical CPU -// belongs to. -func (topo *CPUTopology) CPUCoreID(cpu int) (int, error) { - info, ok := topo.CPUDetails[cpu] - if !ok { - return -1, fmt.Errorf("unknown CPU ID: %d", cpu) - } - return info.CoreID, nil -} - -// CPUCoreID returns the socket ID which the given logical CPU belongs to. -func (topo *CPUTopology) CPUSocketID(cpu int) (int, error) { - info, ok := topo.CPUDetails[cpu] - if !ok { - return -1, fmt.Errorf("unknown CPU ID: %d", cpu) - } - return info.SocketID, nil -} - -// CPUCoreID returns the NUMA node ID which the given logical CPU belongs to. -func (topo *CPUTopology) CPUNUMANodeID(cpu int) (int, error) { - info, ok := topo.CPUDetails[cpu] - if !ok { - return -1, fmt.Errorf("unknown CPU ID: %d", cpu) - } - return info.NUMANodeID, nil -} - -// CheckAlignment returns alignment information for the given cpuset in -// the context of the current CPU topology -func (topo *CPUTopology) CheckAlignment(cpus cpuset.CPUSet) Alignment { - cpuList := cpus.UnsortedList() - return Alignment{ - UncoreCache: isAlignedAtUncoreCache(topo, cpuList...), - } -} - -// CPUInfo contains the NUMA, socket, UncoreCache and core IDs associated with a CPU. -type CPUInfo struct { - NUMANodeID int - SocketID int - CoreID int - UncoreCacheID int -} - -// KeepOnly returns a new CPUDetails object with only the supplied cpus. -func (d CPUDetails) KeepOnly(cpus cpuset.CPUSet) CPUDetails { - result := CPUDetails{} - for cpu, info := range d { - if cpus.Contains(cpu) { - result[cpu] = info - } - } - return result -} - -// UncoreCaches returns all the uncorecache Id (L3 Index) associated with the CPUs in this CPUDetails -func (d CPUDetails) UncoreCaches() cpuset.CPUSet { - var numUnCoreIDs []int - for _, info := range d { - numUnCoreIDs = append(numUnCoreIDs, info.UncoreCacheID) - } - return cpuset.New(numUnCoreIDs...) -} - -// UnCoresInNUMANodes returns all of the uncore IDs associated with the given -// NUMANode IDs in this CPUDetails. -func (d CPUDetails) UncoreInNUMANodes(ids ...int) cpuset.CPUSet { - var unCoreIDs []int - for _, id := range ids { - for _, info := range d { - if info.NUMANodeID == id { - unCoreIDs = append(unCoreIDs, info.UncoreCacheID) - } - } - } - return cpuset.New(unCoreIDs...) -} - -// CoresNeededInUncoreCache returns either the full list of all available unique core IDs associated with the given -// UnCoreCache IDs in this CPUDetails or subset that matches the ask. -func (d CPUDetails) CoresNeededInUncoreCache(numCoresNeeded int, ids ...int) cpuset.CPUSet { - coreIDs := d.coresInUncoreCache(ids...) - if coreIDs.Size() <= numCoresNeeded { - return coreIDs - } - tmpCoreIDs := coreIDs.List() - return cpuset.New(tmpCoreIDs[:numCoresNeeded]...) -} - -// Helper function that just gets the cores -func (d CPUDetails) coresInUncoreCache(ids ...int) cpuset.CPUSet { - var coreIDs []int - for _, id := range ids { - for _, info := range d { - if info.UncoreCacheID == id { - coreIDs = append(coreIDs, info.CoreID) - } - } - } - return cpuset.New(coreIDs...) -} - -// CPUsInUncoreCaches returns all the logical CPU IDs associated with the given -// UnCoreCache IDs in this CPUDetails -func (d CPUDetails) CPUsInUncoreCaches(ids ...int) cpuset.CPUSet { - var cpuIDs []int - for _, id := range ids { - for cpu, info := range d { - if info.UncoreCacheID == id { - cpuIDs = append(cpuIDs, cpu) - } - } - } - return cpuset.New(cpuIDs...) -} - -// NUMANodes returns all of the NUMANode IDs associated with the CPUs in this -// CPUDetails. -func (d CPUDetails) NUMANodes() cpuset.CPUSet { - var numaNodeIDs []int - for _, info := range d { - numaNodeIDs = append(numaNodeIDs, info.NUMANodeID) - } - return cpuset.New(numaNodeIDs...) -} - -// NUMANodesInSockets returns all of the logical NUMANode IDs associated with -// the given socket IDs in this CPUDetails. -func (d CPUDetails) NUMANodesInSockets(ids ...int) cpuset.CPUSet { - var numaNodeIDs []int - for _, id := range ids { - for _, info := range d { - if info.SocketID == id { - numaNodeIDs = append(numaNodeIDs, info.NUMANodeID) - } - } - } - return cpuset.New(numaNodeIDs...) -} - -// Sockets returns all of the socket IDs associated with the CPUs in this -// CPUDetails. -func (d CPUDetails) Sockets() cpuset.CPUSet { - var socketIDs []int - for _, info := range d { - socketIDs = append(socketIDs, info.SocketID) - } - return cpuset.New(socketIDs...) -} - -// CPUsInSockets returns all of the logical CPU IDs associated with the given -// socket IDs in this CPUDetails. -func (d CPUDetails) CPUsInSockets(ids ...int) cpuset.CPUSet { - var cpuIDs []int - for _, id := range ids { - for cpu, info := range d { - if info.SocketID == id { - cpuIDs = append(cpuIDs, cpu) - } - } - } - return cpuset.New(cpuIDs...) -} - -// SocketsInNUMANodes returns all of the logical Socket IDs associated with the -// given NUMANode IDs in this CPUDetails. -func (d CPUDetails) SocketsInNUMANodes(ids ...int) cpuset.CPUSet { - var socketIDs []int - for _, id := range ids { - for _, info := range d { - if info.NUMANodeID == id { - socketIDs = append(socketIDs, info.SocketID) - } - } - } - return cpuset.New(socketIDs...) -} - -// Cores returns all of the core IDs associated with the CPUs in this -// CPUDetails. -func (d CPUDetails) Cores() cpuset.CPUSet { - var coreIDs []int - for _, info := range d { - coreIDs = append(coreIDs, info.CoreID) - } - return cpuset.New(coreIDs...) -} - -// CoresInNUMANodes returns all of the core IDs associated with the given -// NUMANode IDs in this CPUDetails. -func (d CPUDetails) CoresInNUMANodes(ids ...int) cpuset.CPUSet { - var coreIDs []int - for _, id := range ids { - for _, info := range d { - if info.NUMANodeID == id { - coreIDs = append(coreIDs, info.CoreID) - } - } - } - return cpuset.New(coreIDs...) -} - -// CoresInSockets returns all of the core IDs associated with the given socket -// IDs in this CPUDetails. -func (d CPUDetails) CoresInSockets(ids ...int) cpuset.CPUSet { - var coreIDs []int - for _, id := range ids { - for _, info := range d { - if info.SocketID == id { - coreIDs = append(coreIDs, info.CoreID) - } - } - } - return cpuset.New(coreIDs...) -} - -// CPUs returns all of the logical CPU IDs in this CPUDetails. -func (d CPUDetails) CPUs() cpuset.CPUSet { - var cpuIDs []int - for cpuID := range d { - cpuIDs = append(cpuIDs, cpuID) - } - return cpuset.New(cpuIDs...) -} - -// CPUsInNUMANodes returns all of the logical CPU IDs associated with the given -// NUMANode IDs in this CPUDetails. -func (d CPUDetails) CPUsInNUMANodes(ids ...int) cpuset.CPUSet { - var cpuIDs []int - for _, id := range ids { - for cpu, info := range d { - if info.NUMANodeID == id { - cpuIDs = append(cpuIDs, cpu) - } - } - } - return cpuset.New(cpuIDs...) -} - -// CPUsInCores returns all of the logical CPU IDs associated with the given -// core IDs in this CPUDetails. -func (d CPUDetails) CPUsInCores(ids ...int) cpuset.CPUSet { - var cpuIDs []int - for _, id := range ids { - for cpu, info := range d { - if info.CoreID == id { - cpuIDs = append(cpuIDs, cpu) - } - } - } - return cpuset.New(cpuIDs...) -} - -func getUncoreCacheID(core cadvisorapi.Core) int { - if len(core.UncoreCaches) < 1 { - // In case cAdvisor is nil, failback to socket alignment since uncorecache is not shared - return core.SocketID - } - // Even though cadvisor API returns a slice, we only expect either 0 or a 1 uncore caches, - // so everything past the first entry should be discarded or ignored - return core.UncoreCaches[0].Id -} - -// Discover returns CPUTopology based on cadvisor node info -func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) { - if machineInfo.NumCores == 0 { - return nil, fmt.Errorf("could not detect number of cpus") - } - - CPUDetails := CPUDetails{} - numPhysicalCores := 0 - - for _, node := range machineInfo.Topology { - numPhysicalCores += len(node.Cores) - for _, core := range node.Cores { - if coreID, err := getUniqueCoreID(core.Threads); err == nil { - for _, cpu := range core.Threads { - CPUDetails[cpu] = CPUInfo{ - CoreID: coreID, - SocketID: core.SocketID, - NUMANodeID: node.Id, - UncoreCacheID: getUncoreCacheID(core), - } - } - } else { - klog.ErrorS(nil, "Could not get unique coreID for socket", "socket", core.SocketID, "core", core.Id, "threads", core.Threads) - return nil, err - } - } - } - - return &CPUTopology{ - NumCPUs: machineInfo.NumCores, - NumSockets: machineInfo.NumSockets, - NumCores: numPhysicalCores, - NumNUMANodes: CPUDetails.NUMANodes().Size(), - NumUncoreCache: CPUDetails.UncoreCaches().Size(), - CPUDetails: CPUDetails, - }, nil -} - -// getUniqueCoreID computes coreId as the lowest cpuID -// for a given Threads []int slice. This will assure that coreID's are -// platform unique (opposite to what cAdvisor reports) -func getUniqueCoreID(threads []int) (coreID int, err error) { - if len(threads) == 0 { - return 0, fmt.Errorf("no cpus provided") - } - - if len(threads) != cpuset.New(threads...).Size() { - return 0, fmt.Errorf("cpus provided are not unique") - } - - min := threads[0] - for _, thread := range threads[1:] { - if thread < min { - min = thread - } - } - - return min, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/OWNERS deleted file mode 100644 index 5c8da238f8e..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: [] -reviewers: - - klueska -emeritus_approvers: - - vishh - - jiayingz diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint/checkpoint.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint/checkpoint.go deleted file mode 100644 index b9dcdaf829d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint/checkpoint.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package checkpoint - -import ( - "encoding/json" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum" -) - -// DeviceManagerCheckpoint defines the operations to retrieve pod devices -type DeviceManagerCheckpoint interface { - checkpointmanager.Checkpoint - GetData() ([]PodDevicesEntry, map[string][]string) -} - -// DevicesPerNUMA represents device ids obtained from device plugin per NUMA node id -type DevicesPerNUMA map[int64][]string - -// PodDevicesEntry connects pod information to devices -type PodDevicesEntry struct { - PodUID string - ContainerName string - ResourceName string - DeviceIDs DevicesPerNUMA - AllocResp []byte -} - -// checkpointData struct is used to store pod to device allocation information -// in a checkpoint file. -// TODO: add version control when we need to change checkpoint format. -type checkpointData struct { - PodDeviceEntries []PodDevicesEntry - RegisteredDevices map[string][]string -} - -// Data holds checkpoint data and its checksum -type Data struct { - Data checkpointData - Checksum checksum.Checksum -} - -// NewDevicesPerNUMA is a function that creates DevicesPerNUMA map -func NewDevicesPerNUMA() DevicesPerNUMA { - return make(DevicesPerNUMA) -} - -// Devices is a function that returns all device ids for all NUMA nodes -// and represent it as sets.Set[string] -func (dev DevicesPerNUMA) Devices() sets.Set[string] { - result := sets.New[string]() - - for _, devs := range dev { - result.Insert(devs...) - } - return result -} - -// New returns an instance of Checkpoint - must be an alias for the most recent version -func New(devEntries []PodDevicesEntry, devices map[string][]string) DeviceManagerCheckpoint { - return newV2(devEntries, devices) -} - -func newV2(devEntries []PodDevicesEntry, devices map[string][]string) DeviceManagerCheckpoint { - return &Data{ - Data: checkpointData{ - PodDeviceEntries: devEntries, - RegisteredDevices: devices, - }, - } -} - -// MarshalCheckpoint returns marshalled data -func (cp *Data) MarshalCheckpoint() ([]byte, error) { - cp.Checksum = checksum.New(cp.Data) - return json.Marshal(*cp) -} - -// UnmarshalCheckpoint returns unmarshalled data -func (cp *Data) UnmarshalCheckpoint(blob []byte) error { - return json.Unmarshal(blob, cp) -} - -// VerifyChecksum verifies that passed checksum is same as calculated checksum -func (cp *Data) VerifyChecksum() error { - return cp.Checksum.Verify(cp.Data) -} - -// GetData returns device entries and registered devices in the *most recent* -// checkpoint format, *not* in the original format stored on disk. -func (cp *Data) GetData() ([]PodDevicesEntry, map[string][]string) { - return cp.Data.PodDeviceEntries, cp.Data.RegisteredDevices -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go deleted file mode 100644 index a9f7e7bed90..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package devicemanager - -import ( - "context" - "fmt" - "sync" - "time" - - pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" - plugin "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1" -) - -// endpoint maps to a single registered device plugin. It is responsible -// for managing gRPC communications with the device plugin and caching -// device states reported by the device plugin. -type endpoint interface { - getPreferredAllocation(available, mustInclude []string, size int) (*pluginapi.PreferredAllocationResponse, error) - allocate(devs []string) (*pluginapi.AllocateResponse, error) - preStartContainer(devs []string) (*pluginapi.PreStartContainerResponse, error) - setStopTime(t time.Time) - isStopped() bool - stopGracePeriodExpired() bool -} - -type endpointImpl struct { - mutex sync.Mutex - resourceName string - api pluginapi.DevicePluginClient - stopTime time.Time - client plugin.Client // for testing only -} - -// newEndpointImpl creates a new endpoint for the given resourceName. -// This is to be used during normal device plugin registration. -func newEndpointImpl(p plugin.DevicePlugin) *endpointImpl { - return &endpointImpl{ - api: p.API(), - resourceName: p.Resource(), - } -} - -// newStoppedEndpointImpl creates a new endpoint for the given resourceName with stopTime set. -// This is to be used during Kubelet restart, before the actual device plugin re-registers. -func newStoppedEndpointImpl(resourceName string) *endpointImpl { - return &endpointImpl{ - resourceName: resourceName, - stopTime: time.Now(), - } -} - -func (e *endpointImpl) isStopped() bool { - e.mutex.Lock() - defer e.mutex.Unlock() - return !e.stopTime.IsZero() -} - -func (e *endpointImpl) stopGracePeriodExpired() bool { - e.mutex.Lock() - defer e.mutex.Unlock() - return !e.stopTime.IsZero() && time.Since(e.stopTime) > endpointStopGracePeriod -} - -func (e *endpointImpl) setStopTime(t time.Time) { - e.mutex.Lock() - defer e.mutex.Unlock() - e.stopTime = t -} - -// getPreferredAllocation issues GetPreferredAllocation gRPC call to the device plugin. -func (e *endpointImpl) getPreferredAllocation(available, mustInclude []string, size int) (*pluginapi.PreferredAllocationResponse, error) { - if e.isStopped() { - return nil, fmt.Errorf(errEndpointStopped, e) - } - return e.api.GetPreferredAllocation(context.Background(), &pluginapi.PreferredAllocationRequest{ - ContainerRequests: []*pluginapi.ContainerPreferredAllocationRequest{ - { - AvailableDeviceIDs: available, - MustIncludeDeviceIDs: mustInclude, - AllocationSize: int32(size), - }, - }, - }) -} - -// allocate issues Allocate gRPC call to the device plugin. -func (e *endpointImpl) allocate(devs []string) (*pluginapi.AllocateResponse, error) { - if e.isStopped() { - return nil, fmt.Errorf(errEndpointStopped, e) - } - return e.api.Allocate(context.Background(), &pluginapi.AllocateRequest{ - ContainerRequests: []*pluginapi.ContainerAllocateRequest{ - {DevicesIDs: devs}, - }, - }) -} - -// preStartContainer issues PreStartContainer gRPC call to the device plugin. -func (e *endpointImpl) preStartContainer(devs []string) (*pluginapi.PreStartContainerResponse, error) { - if e.isStopped() { - return nil, fmt.Errorf(errEndpointStopped, e) - } - ctx, cancel := context.WithTimeout(context.Background(), pluginapi.KubeletPreStartContainerRPCTimeoutInSecs*time.Second) - defer cancel() - return e.api.PreStartContainer(ctx, &pluginapi.PreStartContainerRequest{ - DevicesIDs: devs, - }) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go deleted file mode 100644 index 9ed9c132293..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go +++ /dev/null @@ -1,1185 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package devicemanager - -import ( - "context" - goerrors "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sort" - "sync" - "time" - - cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/klog/v2" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - errorsutil "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/server/healthz" - utilfeature "k8s.io/apiserver/pkg/util/feature" - pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint" - plugin "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1" - "k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/config" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" - "k8s.io/kubernetes/pkg/kubelet/metrics" - "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" -) - -const nodeWithoutTopology = -1 - -// ActivePodsFunc is a function that returns a list of pods to reconcile. -type ActivePodsFunc func() []*v1.Pod - -// ManagerImpl is the structure in charge of managing Device Plugins. -type ManagerImpl struct { - checkpointdir string - - endpoints map[string]endpointInfo // Key is ResourceName - mutex sync.Mutex - - server plugin.Server - - // activePods is a method for listing active pods on the node - // so the amount of pluginResources requested by existing pods - // could be counted when updating allocated devices - activePods ActivePodsFunc - - // sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness. - // We use it to determine when we can purge inactive pods from checkpointed state. - sourcesReady config.SourcesReady - - // allDevices holds all the devices currently registered to the device manager - allDevices ResourceDeviceInstances - - // healthyDevices contains all the registered healthy resourceNames and their exported device IDs. - healthyDevices map[string]sets.Set[string] - - // unhealthyDevices contains all the unhealthy devices and their exported device IDs. - unhealthyDevices map[string]sets.Set[string] - - // allocatedDevices contains allocated deviceIds, keyed by resourceName. - allocatedDevices map[string]sets.Set[string] - - // podDevices contains pod to allocated device mapping. - podDevices *podDevices - checkpointManager checkpointmanager.CheckpointManager - - // List of NUMA Nodes available on the underlying machine - numaNodes []int - - // Store of Topology Affinities that the Device Manager can query. - topologyAffinityStore topologymanager.Store - - // devicesToReuse contains devices that can be reused as they have been allocated to - // init containers. - devicesToReuse PodReusableDevices - - // containerMap provides a mapping from (pod, container) -> containerID - // for all containers in a pod. Used to detect pods running across a restart - containerMap containermap.ContainerMap - - // containerRunningSet identifies which container among those present in `containerMap` - // was reported running by the container runtime when `containerMap` was computed. - // Used to detect pods running across a restart - containerRunningSet sets.Set[string] - - // update channel for device health updates - update chan resourceupdates.Update -} - -type endpointInfo struct { - e endpoint - opts *pluginapi.DevicePluginOptions -} - -type sourcesReadyStub struct{} - -// PodReusableDevices is a map by pod name of devices to reuse. -type PodReusableDevices map[string]map[string]sets.Set[string] - -func (s *sourcesReadyStub) AddSource(source string) {} -func (s *sourcesReadyStub) AllReady() bool { return true } - -// NewManagerImpl creates a new manager. -func NewManagerImpl(topology []cadvisorapi.Node, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) { - socketPath := pluginapi.KubeletSocket - if runtime.GOOS == "windows" { - socketPath = os.Getenv("SYSTEMDRIVE") + pluginapi.KubeletSocketWindows - } - return newManagerImpl(socketPath, topology, topologyAffinityStore) -} - -func newManagerImpl(socketPath string, topology []cadvisorapi.Node, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) { - klog.V(2).InfoS("Creating Device Plugin manager", "path", socketPath) - - var numaNodes []int - for _, node := range topology { - numaNodes = append(numaNodes, node.Id) - } - - manager := &ManagerImpl{ - endpoints: make(map[string]endpointInfo), - - allDevices: NewResourceDeviceInstances(), - healthyDevices: make(map[string]sets.Set[string]), - unhealthyDevices: make(map[string]sets.Set[string]), - allocatedDevices: make(map[string]sets.Set[string]), - podDevices: newPodDevices(), - numaNodes: numaNodes, - topologyAffinityStore: topologyAffinityStore, - devicesToReuse: make(PodReusableDevices), - update: make(chan resourceupdates.Update, 100), - } - - server, err := plugin.NewServer(socketPath, manager, manager) - if err != nil { - return nil, fmt.Errorf("failed to create plugin server: %v", err) - } - - manager.server = server - manager.checkpointdir, _ = filepath.Split(server.SocketPath()) - - // The following structures are populated with real implementations in manager.Start() - // Before that, initializes them to perform no-op operations. - manager.activePods = func() []*v1.Pod { return []*v1.Pod{} } - manager.sourcesReady = &sourcesReadyStub{} - checkpointManager, err := checkpointmanager.NewCheckpointManager(manager.checkpointdir) - if err != nil { - return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err) - } - manager.checkpointManager = checkpointManager - - return manager, nil -} - -func (m *ManagerImpl) Updates() <-chan resourceupdates.Update { - return m.update -} - -// CleanupPluginDirectory is to remove all existing unix sockets -// from /var/lib/kubelet/device-plugins on Device Plugin Manager start -func (m *ManagerImpl) CleanupPluginDirectory(dir string) error { - d, err := os.Open(dir) - if err != nil { - return err - } - defer d.Close() - names, err := d.Readdirnames(-1) - if err != nil { - return err - } - var errs []error - for _, name := range names { - filePath := filepath.Join(dir, name) - if filePath == m.checkpointFile() { - continue - } - stat, err := os.Stat(filePath) - if err != nil { - klog.ErrorS(err, "Failed to stat file", "path", filePath) - continue - } - if stat.IsDir() || stat.Mode()&os.ModeSocket == 0 { - continue - } - err = os.RemoveAll(filePath) - if err != nil { - errs = append(errs, err) - klog.ErrorS(err, "Failed to remove file", "path", filePath) - continue - } - } - return errorsutil.NewAggregate(errs) -} - -// PluginConnected is to connect a plugin to a new endpoint. -// This is done as part of device plugin registration. -func (m *ManagerImpl) PluginConnected(resourceName string, p plugin.DevicePlugin) error { - options, err := p.API().GetDevicePluginOptions(context.Background(), &pluginapi.Empty{}) - if err != nil { - return fmt.Errorf("failed to get device plugin options: %v", err) - } - - e := newEndpointImpl(p) - - m.mutex.Lock() - defer m.mutex.Unlock() - m.endpoints[resourceName] = endpointInfo{e, options} - - klog.V(2).InfoS("Device plugin connected", "resourceName", resourceName) - return nil -} - -// PluginDisconnected is to disconnect a plugin from an endpoint. -// This is done as part of device plugin deregistration. -func (m *ManagerImpl) PluginDisconnected(resourceName string) { - m.mutex.Lock() - defer m.mutex.Unlock() - - if ep, exists := m.endpoints[resourceName]; exists { - m.markResourceUnhealthy(resourceName) - klog.V(2).InfoS("Endpoint became unhealthy", "resourceName", resourceName, "endpoint", ep) - - ep.e.setStopTime(time.Now()) - } -} - -// PluginListAndWatchReceiver receives ListAndWatchResponse from a device plugin -// and ensures that an upto date state (e.g. number of devices and device health) -// is captured. Also, registered device and device to container allocation -// information is checkpointed to the disk. -func (m *ManagerImpl) PluginListAndWatchReceiver(resourceName string, resp *pluginapi.ListAndWatchResponse) { - var devices []pluginapi.Device - for _, d := range resp.Devices { - devices = append(devices, *d) - } - m.genericDeviceUpdateCallback(resourceName, devices) -} - -func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, devices []pluginapi.Device) { - healthyCount := 0 - m.mutex.Lock() - m.healthyDevices[resourceName] = sets.New[string]() - m.unhealthyDevices[resourceName] = sets.New[string]() - oldDevices := m.allDevices[resourceName] - podsToUpdate := sets.New[string]() - m.allDevices[resourceName] = make(map[string]pluginapi.Device) - for _, dev := range devices { - - if utilfeature.DefaultFeatureGate.Enabled(features.ResourceHealthStatus) { - // compare with old device's health and send update to the channel if needed - updatePodUIDFn := func(deviceID string) { - podUID, _ := m.podDevices.getPodAndContainerForDevice(deviceID) - if podUID != "" { - podsToUpdate.Insert(podUID) - } - } - if oldDev, ok := oldDevices[dev.ID]; ok { - if oldDev.Health != dev.Health { - updatePodUIDFn(dev.ID) - } - } else { - // if this is a new device, it might have existed before and disappeared for a while - // but still be assigned to a Pod. In this case, we need to send an update to the channel - updatePodUIDFn(dev.ID) - } - } - - m.allDevices[resourceName][dev.ID] = dev - if dev.Health == pluginapi.Healthy { - m.healthyDevices[resourceName].Insert(dev.ID) - healthyCount++ - } else { - m.unhealthyDevices[resourceName].Insert(dev.ID) - } - } - m.mutex.Unlock() - - if utilfeature.DefaultFeatureGate.Enabled(features.ResourceHealthStatus) { - if len(podsToUpdate) > 0 { - select { - case m.update <- resourceupdates.Update{PodUIDs: podsToUpdate.UnsortedList()}: - default: - klog.ErrorS(goerrors.New("device update channel is full"), "discard pods info", "podsToUpdate", podsToUpdate.UnsortedList()) - } - } - } - - if err := m.writeCheckpoint(); err != nil { - klog.ErrorS(err, "Writing checkpoint encountered") - } - klog.V(2).InfoS("Processed device updates for resource", "resourceName", resourceName, "totalCount", len(devices), "healthyCount", healthyCount) -} - -// GetWatcherHandler returns the plugin handler -func (m *ManagerImpl) GetWatcherHandler() cache.PluginHandler { - return m.server -} - -// GetHealthChecker returns the plugin handler -func (m *ManagerImpl) GetHealthChecker() healthz.HealthChecker { - return m.server -} - -// checkpointFile returns device plugin checkpoint file path. -func (m *ManagerImpl) checkpointFile() string { - return filepath.Join(m.checkpointdir, kubeletDeviceManagerCheckpoint) -} - -// Start starts the Device Plugin Manager and start initialization of -// podDevices and allocatedDevices information from checkpointed state and -// starts device plugin registration service. -func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.Set[string]) error { - klog.V(2).InfoS("Starting Device Plugin manager") - - m.activePods = activePods - m.sourcesReady = sourcesReady - m.containerMap = initialContainers - m.containerRunningSet = initialContainerRunningSet - - // Loads in allocatedDevices information from disk. - err := m.readCheckpoint() - if err != nil { - klog.ErrorS(err, "Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date") - } - - return m.server.Start() -} - -// Stop is the function that can stop the plugin server. -// Can be called concurrently, more than once, and is safe to call -// without a prior Start. -func (m *ManagerImpl) Stop() error { - return m.server.Stop() -} - -// Allocate is the call that you can use to allocate a set of devices -// from the registered device plugins. -func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error { - if _, ok := m.devicesToReuse[string(pod.UID)]; !ok { - m.devicesToReuse[string(pod.UID)] = make(map[string]sets.Set[string]) - } - // If pod entries to m.devicesToReuse other than the current pod exist, delete them. - for podUID := range m.devicesToReuse { - if podUID != string(pod.UID) { - delete(m.devicesToReuse, podUID) - } - } - // Allocate resources for init containers first as we know the caller always loops - // through init containers before looping through app containers. Should the caller - // ever change those semantics, this logic will need to be amended. - for _, initContainer := range pod.Spec.InitContainers { - if container.Name == initContainer.Name { - if err := m.allocateContainerResources(pod, container, m.devicesToReuse[string(pod.UID)]); err != nil { - return err - } - if !podutil.IsRestartableInitContainer(&initContainer) { - m.podDevices.addContainerAllocatedResources(string(pod.UID), container.Name, m.devicesToReuse[string(pod.UID)]) - } else { - // If the init container is restartable, we need to keep the - // devices allocated. In other words, we should remove them - // from the devicesToReuse. - m.podDevices.removeContainerAllocatedResources(string(pod.UID), container.Name, m.devicesToReuse[string(pod.UID)]) - } - return nil - } - } - if err := m.allocateContainerResources(pod, container, m.devicesToReuse[string(pod.UID)]); err != nil { - return err - } - m.podDevices.removeContainerAllocatedResources(string(pod.UID), container.Name, m.devicesToReuse[string(pod.UID)]) - return nil -} - -// UpdatePluginResources updates node resources based on devices already allocated to pods. -func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { - pod := attrs.Pod - - // quick return if no pluginResources requested - if !m.podDevices.hasPod(string(pod.UID)) { - return nil - } - - m.sanitizeNodeAllocatable(node) - return nil -} - -func (m *ManagerImpl) markResourceUnhealthy(resourceName string) { - klog.V(2).InfoS("Mark all resources Unhealthy for resource", "resourceName", resourceName) - healthyDevices := sets.New[string]() - if _, ok := m.healthyDevices[resourceName]; ok { - healthyDevices = m.healthyDevices[resourceName] - m.healthyDevices[resourceName] = sets.New[string]() - } - if _, ok := m.unhealthyDevices[resourceName]; !ok { - m.unhealthyDevices[resourceName] = sets.New[string]() - } - m.unhealthyDevices[resourceName] = m.unhealthyDevices[resourceName].Union(healthyDevices) -} - -// GetCapacity is expected to be called when Kubelet updates its node status. -// The first returned variable contains the registered device plugin resource capacity. -// The second returned variable contains the registered device plugin resource allocatable. -// The third returned variable contains previously registered resources that are no longer active. -// Kubelet uses this information to update resource capacity/allocatable in its node status. -// After the call, device plugin can remove the inactive resources from its internal list as the -// change is already reflected in Kubelet node status. -// Note in the special case after Kubelet restarts, device plugin resource capacities can -// temporarily drop to zero till corresponding device plugins re-register. This is OK because -// cm.UpdatePluginResource() run during predicate Admit guarantees we adjust nodeinfo -// capacity for already allocated pods so that they can continue to run. However, new pods -// requiring device plugin resources will not be scheduled till device plugin re-registers. -func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) { - needsUpdateCheckpoint := false - var capacity = v1.ResourceList{} - var allocatable = v1.ResourceList{} - deletedResources := sets.New[string]() - m.mutex.Lock() - for resourceName, devices := range m.healthyDevices { - eI, ok := m.endpoints[resourceName] - if (ok && eI.e.stopGracePeriodExpired()) || !ok { - // The resources contained in endpoints and (un)healthyDevices - // should always be consistent. Otherwise, we run with the risk - // of failing to garbage collect non-existing resources or devices. - if !ok { - klog.InfoS("Unexpected: healthyDevices and endpoints are out of sync") - } - delete(m.endpoints, resourceName) - delete(m.healthyDevices, resourceName) - deletedResources.Insert(resourceName) - needsUpdateCheckpoint = true - } else { - capacity[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) - allocatable[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) - } - } - for resourceName, devices := range m.unhealthyDevices { - eI, ok := m.endpoints[resourceName] - if (ok && eI.e.stopGracePeriodExpired()) || !ok { - if !ok { - klog.InfoS("Unexpected: unhealthyDevices and endpoints became out of sync") - } - delete(m.endpoints, resourceName) - delete(m.unhealthyDevices, resourceName) - deletedResources.Insert(resourceName) - needsUpdateCheckpoint = true - } else { - capacityCount := capacity[v1.ResourceName(resourceName)] - unhealthyCount := *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) - capacityCount.Add(unhealthyCount) - capacity[v1.ResourceName(resourceName)] = capacityCount - } - } - m.mutex.Unlock() - if needsUpdateCheckpoint { - if err := m.writeCheckpoint(); err != nil { - klog.ErrorS(err, "Failed to write checkpoint file") - } - } - return capacity, allocatable, deletedResources.UnsortedList() -} - -// Checkpoints device to container allocation information to disk. -func (m *ManagerImpl) writeCheckpoint() error { - m.mutex.Lock() - registeredDevs := make(map[string][]string) - for resource, devices := range m.healthyDevices { - registeredDevs[resource] = devices.UnsortedList() - } - data := checkpoint.New(m.podDevices.toCheckpointData(), - registeredDevs) - m.mutex.Unlock() - err := m.checkpointManager.CreateCheckpoint(kubeletDeviceManagerCheckpoint, data) - if err != nil { - err2 := fmt.Errorf("failed to write checkpoint file %q: %v", kubeletDeviceManagerCheckpoint, err) - klog.ErrorS(err, "Failed to write checkpoint file") - return err2 - } - klog.V(4).InfoS("Checkpoint file written", "checkpoint", kubeletDeviceManagerCheckpoint) - return nil -} - -// Reads device to container allocation information from disk, and populates -// m.allocatedDevices accordingly. -func (m *ManagerImpl) readCheckpoint() error { - cp, err := m.getCheckpoint() - if err != nil { - if err == errors.ErrCheckpointNotFound { - // no point in trying anything else - klog.ErrorS(err, "Failed to read data from checkpoint", "checkpoint", kubeletDeviceManagerCheckpoint) - return nil - } - return err - } - - m.mutex.Lock() - defer m.mutex.Unlock() - podDevices, registeredDevs := cp.GetData() - m.podDevices.fromCheckpointData(podDevices) - m.allocatedDevices = m.podDevices.devices() - for resource := range registeredDevs { - // During start up, creates empty healthyDevices list so that the resource capacity - // will stay zero till the corresponding device plugin re-registers. - m.healthyDevices[resource] = sets.New[string]() - m.unhealthyDevices[resource] = sets.New[string]() - m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil} - } - - klog.V(4).InfoS("Read data from checkpoint file", "checkpoint", kubeletDeviceManagerCheckpoint) - return nil -} - -func (m *ManagerImpl) getCheckpoint() (checkpoint.DeviceManagerCheckpoint, error) { - registeredDevs := make(map[string][]string) - devEntries := make([]checkpoint.PodDevicesEntry, 0) - cp := checkpoint.New(devEntries, registeredDevs) - err := m.checkpointManager.GetCheckpoint(kubeletDeviceManagerCheckpoint, cp) - return cp, err -} - -// UpdateAllocatedDevices frees any Devices that are bound to terminated pods. -func (m *ManagerImpl) UpdateAllocatedDevices() { - activePods := m.activePods() - if !m.sourcesReady.AllReady() { - return - } - m.mutex.Lock() - defer m.mutex.Unlock() - podsToBeRemoved := m.podDevices.pods() - for _, pod := range activePods { - podsToBeRemoved.Delete(string(pod.UID)) - } - if len(podsToBeRemoved) <= 0 { - return - } - klog.V(3).InfoS("Pods to be removed", "podUIDs", sets.List(podsToBeRemoved)) - m.podDevices.delete(sets.List(podsToBeRemoved)) - // Regenerated allocatedDevices after we update pod allocation information. - m.allocatedDevices = m.podDevices.devices() -} - -// Returns list of device Ids we need to allocate with Allocate rpc call. -// Returns empty list in case we don't need to issue the Allocate rpc call. -func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.Set[string]) (sets.Set[string], error) { - m.mutex.Lock() - defer m.mutex.Unlock() - needed := required - // Gets list of devices that have already been allocated. - // This can happen if a container restarts for example. - devices := m.podDevices.containerDevices(podUID, contName, resource) - if devices != nil { - klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", podUID, "devices", sets.List(devices)) - needed = needed - devices.Len() - // A pod's resource is not expected to change once admitted by the API server, - // so just fail loudly here. We can revisit this part if this no longer holds. - if needed != 0 { - return nil, fmt.Errorf("pod %q container %q changed request for resource %q from %d to %d", podUID, contName, resource, devices.Len(), required) - } - } - - // We have 3 major flows to handle: - // 1. kubelet running, normal allocation (needed > 0, container being [re]created). Steady state and most common case by far and large. - // 2. kubelet restart. In this scenario every other component of the stack (device plugins, app container, runtime) is still running. - // 3. node reboot. In this scenario device plugins may not be running yet when we try to allocate devices. - // note: if we get this far the runtime is surely running. This is usually enforced at OS level by startup system services dependencies. - - // First we take care of the exceptional flow (scenarios 2 and 3). In both flows, kubelet is reinitializing, and while kubelet is initializing, sources are NOT all ready. - // Is this a simple kubelet restart (scenario 2)? To distinguish, we use the information we got for runtime. If we are asked to allocate devices for containers reported - // running, then it can only be a kubelet restart. On node reboot the runtime and the containers were also shut down. Then, if the container was running, it can only be - // because it already has access to all the required devices, so we got nothing to do and we can bail out. - if !m.sourcesReady.AllReady() && m.isContainerAlreadyRunning(podUID, contName) { - klog.V(3).InfoS("Container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName) - return nil, nil - } - - // We dealt with scenario 2. If we got this far it's either scenario 3 (node reboot) or scenario 1 (steady state, normal flow). - klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName) - healthyDevices, hasRegistered := m.healthyDevices[resource] - - // The following checks are expected to fail only happen on scenario 3 (node reboot). - // The kubelet is reinitializing and got a container from sources. But there's no ordering, so an app container may attempt allocation _before_ the device plugin was created, - // has registered and reported back to kubelet the devices. - // This can only happen on scenario 3 because at steady state (scenario 1) the scheduler prevents pod to be sent towards node which don't report enough devices. - // Note: we need to check the device health and registration status *before* we check how many devices are needed, doing otherwise caused issue #109595 - // Note: if the scheduler is bypassed, we fall back in scenario 1, so we still need these checks. - if !hasRegistered { - return nil, fmt.Errorf("cannot allocate unregistered device %s", resource) - } - - // Check if registered resource has healthy devices - if healthyDevices.Len() == 0 { - return nil, fmt.Errorf("no healthy devices present; cannot allocate unhealthy devices %s", resource) - } - - // Check if all the previously allocated devices are healthy - if !healthyDevices.IsSuperset(devices) { - return nil, fmt.Errorf("previously allocated devices are no longer healthy; cannot allocate unhealthy devices %s", resource) - } - - // We handled the known error paths in scenario 3 (node reboot), so from now on we can fall back in a common path. - // We cover container restart on kubelet steady state with the same flow. - if needed == 0 { - klog.V(3).InfoS("No devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName) - // No change, no work. - return nil, nil - } - - // Declare the list of allocated devices. - // This will be populated and returned below. - allocated := sets.New[string]() - - // Create a closure to help with device allocation - // Returns 'true' once no more devices need to be allocated. - allocateRemainingFrom := func(devices sets.Set[string]) bool { - // When we call callGetPreferredAllocationIfAvailable below, we will release - // the lock and call the device plugin. If someone calls ListResource concurrently, - // device manager will recalculate the allocatedDevices map. Some entries with - // empty sets may be removed, so we reinit here. - if m.allocatedDevices[resource] == nil { - m.allocatedDevices[resource] = sets.New[string]() - } - for device := range devices.Difference(allocated) { - m.allocatedDevices[resource].Insert(device) - allocated.Insert(device) - needed-- - if needed == 0 { - return true - } - } - return false - } - - // Allocates from reusableDevices list first. - if allocateRemainingFrom(reusableDevices) { - return allocated, nil - } - - // Gets Devices in use. - devicesInUse := m.allocatedDevices[resource] - // Gets Available devices. - available := m.healthyDevices[resource].Difference(devicesInUse) - if available.Len() < needed { - return nil, fmt.Errorf("requested number of devices unavailable for %s. Requested: %d, Available: %d", resource, needed, available.Len()) - } - - // Filters available Devices based on NUMA affinity. - aligned, unaligned, noAffinity := m.filterByAffinity(podUID, contName, resource, available) - - // If we can allocate all remaining devices from the set of aligned ones, then - // give the plugin the chance to influence which ones to allocate from that set. - if needed < aligned.Len() { - // First allocate from the preferred devices list (if available). - preferred, err := m.callGetPreferredAllocationIfAvailable(podUID, contName, resource, aligned.Union(allocated), allocated, required) - if err != nil { - return nil, err - } - if allocateRemainingFrom(preferred.Intersection(aligned)) { - return allocated, nil - } - // Then fallback to allocate from the aligned set if no preferred list - // is returned (or not enough devices are returned in that list). - if allocateRemainingFrom(aligned) { - return allocated, nil - } - - return nil, fmt.Errorf("unexpectedly allocated less resources than required. Requested: %d, Got: %d", required, required-needed) - } - - // If we can't allocate all remaining devices from the set of aligned ones, - // then start by first allocating all the aligned devices (to ensure - // that the alignment guaranteed by the TopologyManager is honored). - if allocateRemainingFrom(aligned) { - return allocated, nil - } - - // Then give the plugin the chance to influence the decision on any - // remaining devices to allocate. - preferred, err := m.callGetPreferredAllocationIfAvailable(podUID, contName, resource, available.Union(allocated), allocated, required) - if err != nil { - return nil, err - } - if allocateRemainingFrom(preferred.Intersection(available)) { - return allocated, nil - } - - // Finally, if the plugin did not return a preferred allocation (or didn't - // return a large enough one), then fall back to allocating the remaining - // devices from the 'unaligned' and 'noAffinity' sets. - if allocateRemainingFrom(unaligned) { - return allocated, nil - } - if allocateRemainingFrom(noAffinity) { - return allocated, nil - } - - return nil, fmt.Errorf("unexpectedly allocated less resources than required. Requested: %d, Got: %d", required, required-needed) -} - -func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, available sets.Set[string]) (sets.Set[string], sets.Set[string], sets.Set[string]) { - // If alignment information is not available, just pass the available list back. - hint := m.topologyAffinityStore.GetAffinity(podUID, contName) - if !m.deviceHasTopologyAlignment(resource) || hint.NUMANodeAffinity == nil { - return sets.New[string](), sets.New[string](), available - } - - // Build a map of NUMA Nodes to the devices associated with them. A - // device may be associated to multiple NUMA nodes at the same time. If an - // available device does not have any NUMA Nodes associated with it, add it - // to a list of NUMA Nodes for the fake NUMANode -1. - perNodeDevices := make(map[int]sets.Set[string]) - for d := range available { - if m.allDevices[resource][d].Topology == nil || len(m.allDevices[resource][d].Topology.Nodes) == 0 { - if _, ok := perNodeDevices[nodeWithoutTopology]; !ok { - perNodeDevices[nodeWithoutTopology] = sets.New[string]() - } - perNodeDevices[nodeWithoutTopology].Insert(d) - continue - } - - for _, node := range m.allDevices[resource][d].Topology.Nodes { - if _, ok := perNodeDevices[int(node.ID)]; !ok { - perNodeDevices[int(node.ID)] = sets.New[string]() - } - perNodeDevices[int(node.ID)].Insert(d) - } - } - - // Get a flat list of all the nodes associated with available devices. - var nodes []int - for node := range perNodeDevices { - nodes = append(nodes, node) - } - - // Sort the list of nodes by: - // 1) Nodes contained in the 'hint's affinity set - // 2) Nodes not contained in the 'hint's affinity set - // 3) The fake NUMANode of -1 (assuming it is included in the list) - // Within each of the groups above, sort the nodes by how many devices they contain - sort.Slice(nodes, func(i, j int) bool { - // If one or the other of nodes[i] or nodes[j] is in the 'hint's affinity set - if hint.NUMANodeAffinity.IsSet(nodes[i]) && hint.NUMANodeAffinity.IsSet(nodes[j]) { - return perNodeDevices[nodes[i]].Len() < perNodeDevices[nodes[j]].Len() - } - if hint.NUMANodeAffinity.IsSet(nodes[i]) { - return true - } - if hint.NUMANodeAffinity.IsSet(nodes[j]) { - return false - } - - // If one or the other of nodes[i] or nodes[j] is the fake NUMA node -1 (they can't both be) - if nodes[i] == nodeWithoutTopology { - return false - } - if nodes[j] == nodeWithoutTopology { - return true - } - - // Otherwise both nodes[i] and nodes[j] are real NUMA nodes that are not in the 'hint's' affinity list. - return perNodeDevices[nodes[i]].Len() < perNodeDevices[nodes[j]].Len() - }) - - // Generate three sorted lists of devices. Devices in the first list come - // from valid NUMA Nodes contained in the affinity mask. Devices in the - // second list come from valid NUMA Nodes not in the affinity mask. Devices - // in the third list come from devices with no NUMA Node association (i.e. - // those mapped to the fake NUMA Node -1). Because we loop through the - // sorted list of NUMA nodes in order, within each list, devices are sorted - // by their connection to NUMA Nodes with more devices on them. - var fromAffinity []string - var notFromAffinity []string - var withoutTopology []string - for d := range available { - // Since the same device may be associated with multiple NUMA Nodes. We - // need to be careful not to add each device to multiple lists. The - // logic below ensures this by breaking after the first NUMA node that - // has the device is encountered. - for _, n := range nodes { - if perNodeDevices[n].Has(d) { - if n == nodeWithoutTopology { - withoutTopology = append(withoutTopology, d) - } else if hint.NUMANodeAffinity.IsSet(n) { - fromAffinity = append(fromAffinity, d) - } else { - notFromAffinity = append(notFromAffinity, d) - } - break - } - } - } - - // Return all three lists containing the full set of devices across them. - return sets.New[string](fromAffinity...), sets.New[string](notFromAffinity...), sets.New[string](withoutTopology...) -} - -// allocateContainerResources attempts to allocate all of required device -// plugin resources for the input container, issues an Allocate rpc request -// for each new device resource requirement, processes their AllocateResponses, -// and updates the cached containerDevices on success. -func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.Set[string]) error { - podUID := string(pod.UID) - contName := container.Name - allocatedDevicesUpdated := false - needsUpdateCheckpoint := false - // Extended resources are not allowed to be overcommitted. - // Since device plugin advertises extended resources, - // therefore Requests must be equal to Limits and iterating - // over the Limits should be sufficient. - for k, v := range container.Resources.Limits { - resource := string(k) - needed := int(v.Value()) - klog.V(3).InfoS("Looking for needed resources", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "needed", needed) - if !m.isDevicePluginResource(resource) { - continue - } - // Updates allocatedDevices to garbage collect any stranded resources - // before doing the device plugin allocation. - if !allocatedDevicesUpdated { - m.UpdateAllocatedDevices() - allocatedDevicesUpdated = true - } - allocDevices, err := m.devicesToAllocate(podUID, contName, resource, needed, devicesToReuse[resource]) - if err != nil { - return err - } - if allocDevices == nil || len(allocDevices) <= 0 { - continue - } - - needsUpdateCheckpoint = true - - startRPCTime := time.Now() - // Manager.Allocate involves RPC calls to device plugin, which - // could be heavy-weight. Therefore we want to perform this operation outside - // mutex lock. Note if Allocate call fails, we may leave container resources - // partially allocated for the failed container. We rely on UpdateAllocatedDevices() - // to garbage collect these resources later. Another side effect is that if - // we have X resource A and Y resource B in total, and two containers, container1 - // and container2 both require X resource A and Y resource B. Both allocation - // requests may fail if we serve them in mixed order. - // TODO: may revisit this part later if we see inefficient resource allocation - // in real use as the result of this. Should also consider to parallelize device - // plugin Allocate grpc calls if it becomes common that a container may require - // resources from multiple device plugins. - m.mutex.Lock() - eI, ok := m.endpoints[resource] - m.mutex.Unlock() - if !ok { - m.mutex.Lock() - m.allocatedDevices = m.podDevices.devices() - m.mutex.Unlock() - return fmt.Errorf("unknown Device Plugin %s", resource) - } - - devs := allocDevices.UnsortedList() - // TODO: refactor this part of code to just append a ContainerAllocationRequest - // in a passed in AllocateRequest pointer, and issues a single Allocate call per pod. - klog.V(4).InfoS("Making allocation request for device plugin", "devices", devs, "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name) - resp, err := eI.e.allocate(devs) - metrics.DevicePluginAllocationDuration.WithLabelValues(resource).Observe(metrics.SinceInSeconds(startRPCTime)) - if err != nil { - // In case of allocation failure, we want to restore m.allocatedDevices - // to the actual allocated state from m.podDevices. - m.mutex.Lock() - m.allocatedDevices = m.podDevices.devices() - m.mutex.Unlock() - return err - } - - if len(resp.ContainerResponses) == 0 { - return fmt.Errorf("no containers return in allocation response %v", resp) - } - - allocDevicesWithNUMA := checkpoint.NewDevicesPerNUMA() - // Update internal cached podDevices state. - m.mutex.Lock() - for dev := range allocDevices { - if m.allDevices[resource][dev].Topology == nil || len(m.allDevices[resource][dev].Topology.Nodes) == 0 { - allocDevicesWithNUMA[nodeWithoutTopology] = append(allocDevicesWithNUMA[nodeWithoutTopology], dev) - continue - } - for idx := range m.allDevices[resource][dev].Topology.Nodes { - node := m.allDevices[resource][dev].Topology.Nodes[idx] - allocDevicesWithNUMA[node.ID] = append(allocDevicesWithNUMA[node.ID], dev) - } - } - m.mutex.Unlock() - m.podDevices.insert(podUID, contName, resource, allocDevicesWithNUMA, resp.ContainerResponses[0]) - } - - if needsUpdateCheckpoint { - return m.writeCheckpoint() - } - - return nil -} - -// checkPodActive checks if the given pod is still in activePods list -func (m *ManagerImpl) checkPodActive(pod *v1.Pod) bool { - activePods := m.activePods() - for _, activePod := range activePods { - if activePod.UID == pod.UID { - return true - } - } - - return false -} - -// GetDeviceRunContainerOptions checks whether we have cached containerDevices -// for the passed-in and returns its DeviceRunContainerOptions -// for the found one. An empty struct is returned in case no cached state is found. -func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*DeviceRunContainerOptions, error) { - podUID := string(pod.UID) - contName := container.Name - needsReAllocate := false - for k, v := range container.Resources.Limits { - resource := string(k) - if !m.isDevicePluginResource(resource) || v.Value() == 0 { - continue - } - err := m.callPreStartContainerIfNeeded(podUID, contName, resource) - if err != nil { - return nil, err - } - - if !m.checkPodActive(pod) { - klog.V(5).InfoS("Pod deleted from activePods, skip to reAllocate", "pod", klog.KObj(pod), "podUID", podUID, "containerName", container.Name) - continue - } - - // This is a device plugin resource yet we don't have cached - // resource state. This is likely due to a race during node - // restart. We re-issue allocate request to cover this race. - if m.podDevices.containerDevices(podUID, contName, resource) == nil { - needsReAllocate = true - } - } - if needsReAllocate { - klog.V(2).InfoS("Needs to re-allocate device plugin resources for pod", "pod", klog.KObj(pod), "containerName", container.Name) - if err := m.Allocate(pod, container); err != nil { - return nil, err - } - } - return m.podDevices.deviceRunContainerOptions(string(pod.UID), container.Name), nil -} - -// callPreStartContainerIfNeeded issues PreStartContainer grpc call for device plugin resource -// with PreStartRequired option set. -func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource string) error { - m.mutex.Lock() - eI, ok := m.endpoints[resource] - if !ok { - m.mutex.Unlock() - return fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource) - } - - if eI.opts == nil || !eI.opts.PreStartRequired { - m.mutex.Unlock() - klog.V(5).InfoS("Plugin options indicate to skip PreStartContainer for resource", "podUID", podUID, "resourceName", resource, "containerName", contName) - return nil - } - - devices := m.podDevices.containerDevices(podUID, contName, resource) - if devices == nil { - m.mutex.Unlock() - return fmt.Errorf("no devices found allocated in local cache for pod %s, container %s, resource %s", podUID, contName, resource) - } - - m.mutex.Unlock() - devs := devices.UnsortedList() - klog.V(4).InfoS("Issuing a PreStartContainer call for container", "containerName", contName, "podUID", podUID) - _, err := eI.e.preStartContainer(devs) - if err != nil { - return fmt.Errorf("device plugin PreStartContainer rpc failed with err: %v", err) - } - // TODO: Add metrics support for init RPC - return nil -} - -// callGetPreferredAllocationIfAvailable issues GetPreferredAllocation grpc -// call for device plugin resource with GetPreferredAllocationAvailable option set. -func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, resource string, available, mustInclude sets.Set[string], size int) (sets.Set[string], error) { - eI, ok := m.endpoints[resource] - if !ok { - return nil, fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource) - } - - if eI.opts == nil || !eI.opts.GetPreferredAllocationAvailable { - klog.V(5).InfoS("Plugin options indicate to skip GetPreferredAllocation for resource", "resourceName", resource, "podUID", podUID, "containerName", contName) - return nil, nil - } - - m.mutex.Unlock() - klog.V(4).InfoS("Issuing a GetPreferredAllocation call for container", "resourceName", resource, "containerName", contName, "podUID", podUID) - resp, err := eI.e.getPreferredAllocation(available.UnsortedList(), mustInclude.UnsortedList(), size) - m.mutex.Lock() - if err != nil { - return nil, fmt.Errorf("device plugin GetPreferredAllocation rpc failed with err: %v", err) - } - if resp != nil && len(resp.ContainerResponses) > 0 { - return sets.New[string](resp.ContainerResponses[0].DeviceIDs...), nil - } - return sets.New[string](), nil -} - -// sanitizeNodeAllocatable scans through allocatedDevices in the device manager -// and if necessary, updates allocatableResource in nodeInfo to at least equal to -// the allocated capacity. This allows pods that have already been scheduled on -// the node to pass GeneralPredicates admission checking even upon device plugin failure. -func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulerframework.NodeInfo) { - var newAllocatableResource *schedulerframework.Resource - allocatableResource := node.Allocatable - if allocatableResource.ScalarResources == nil { - allocatableResource.ScalarResources = make(map[v1.ResourceName]int64) - } - - m.mutex.Lock() - defer m.mutex.Unlock() - for resource, devices := range m.allocatedDevices { - needed := devices.Len() - quant, ok := allocatableResource.ScalarResources[v1.ResourceName(resource)] - if ok && int(quant) >= needed { - continue - } - // Needs to update nodeInfo.AllocatableResource to make sure - // NodeInfo.allocatableResource at least equal to the capacity already allocated. - if newAllocatableResource == nil { - newAllocatableResource = allocatableResource.Clone() - } - newAllocatableResource.ScalarResources[v1.ResourceName(resource)] = int64(needed) - } - if newAllocatableResource != nil { - node.Allocatable = newAllocatableResource - } -} - -func (m *ManagerImpl) isDevicePluginResource(resource string) bool { - m.mutex.Lock() - defer m.mutex.Unlock() - _, registeredResource := m.healthyDevices[resource] - _, allocatedResource := m.allocatedDevices[resource] - // Return true if this is either an active device plugin resource or - // a resource we have previously allocated. - if registeredResource || allocatedResource { - return true - } - return false -} - -// GetAllocatableDevices returns information about all the healthy devices known to the manager -func (m *ManagerImpl) GetAllocatableDevices() ResourceDeviceInstances { - m.mutex.Lock() - defer m.mutex.Unlock() - resp := m.allDevices.Filter(m.healthyDevices) - klog.V(4).InfoS("GetAllocatableDevices", "known", len(m.allDevices), "allocatable", len(resp)) - return resp -} - -// GetDevices returns the devices used by the specified container -func (m *ManagerImpl) GetDevices(podUID, containerName string) ResourceDeviceInstances { - return m.podDevices.getContainerDevices(podUID, containerName) -} - -func (m *ManagerImpl) UpdateAllocatedResourcesStatus(pod *v1.Pod, status *v1.PodStatus) { - m.mutex.Lock() - defer m.mutex.Unlock() - - // Today we ignore edge cases that are not likely to happen: - // - update statuses for containers that are in spec, but not in status - // - update statuses for resources requested in spec, but with no information in podDevices - for i, containerStatus := range status.ContainerStatuses { - devices := m.podDevices.getContainerDevices(string(pod.UID), containerStatus.Name) - - for resourceName, deviceInstances := range devices { - for id, d := range deviceInstances { - health := pluginapi.Healthy - // this is unlikely, but check for existence here anyways - if r, ok := m.allDevices[resourceName]; ok { - if _, ok := r[id]; ok { - health = m.allDevices[resourceName][id].Health - } - } - - d.Health = health - - deviceInstances[id] = d - } - } - - for resourceName, dI := range devices { - resourceStatus := v1.ResourceStatus{ - Name: v1.ResourceName(resourceName), - Resources: []v1.ResourceHealth{}, - } - - for id, d := range dI { - health := v1.ResourceHealthStatusHealthy - if d.Health != pluginapi.Healthy { - health = v1.ResourceHealthStatusUnhealthy - } - resourceStatus.Resources = append(resourceStatus.Resources, v1.ResourceHealth{ - ResourceID: v1.ResourceID(id), - Health: health, - }) - } - - if status.ContainerStatuses[i].AllocatedResourcesStatus == nil { - status.ContainerStatuses[i].AllocatedResourcesStatus = []v1.ResourceStatus{} - } - - // look up the resource status by name and update it - found := false - for j, rs := range status.ContainerStatuses[i].AllocatedResourcesStatus { - if rs.Name == resourceStatus.Name { - status.ContainerStatuses[i].AllocatedResourcesStatus[j] = resourceStatus - found = true - break - } - } - - if !found { - status.ContainerStatuses[i].AllocatedResourcesStatus = append(status.ContainerStatuses[i].AllocatedResourcesStatus, resourceStatus) - } - } - } -} - -// ShouldResetExtendedResourceCapacity returns whether the extended resources should be zeroed or not, -// depending on whether the node has been recreated. Absence of the checkpoint file strongly indicates the node -// has been recreated. -func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool { - checkpoints, err := m.checkpointManager.ListCheckpoints() - if err != nil { - return false - } - return len(checkpoints) == 0 -} - -func (m *ManagerImpl) isContainerAlreadyRunning(podUID, cntName string) bool { - cntID, err := m.containerMap.GetContainerID(podUID, cntName) - if err != nil { - klog.ErrorS(err, "Container not found in the initial map, assumed NOT running", "podUID", podUID, "containerName", cntName) - return false - } - - // note that if container runtime is down when kubelet restarts, this set will be empty, - // so on kubelet restart containers will again fail admission, hitting https://github.com/kubernetes/kubernetes/issues/118559 again. - // This scenario should however be rare enough. - if !m.containerRunningSet.Has(cntID) { - klog.V(4).InfoS("Container not present in the initial running set", "podUID", podUID, "containerName", cntName, "containerID", cntID) - return false - } - - // Once we make it here we know we have a running container. - klog.V(4).InfoS("Container found in the initial set, assumed running", "podUID", podUID, "containerName", cntName, "containerID", cntID) - return true -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/api.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/api.go deleted file mode 100644 index f84cd551ffd..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/api.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - api "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" -) - -// RegistrationHandler is an interface for handling device plugin registration -// and plugin directory cleanup. -type RegistrationHandler interface { - CleanupPluginDirectory(string) error -} - -// ClientHandler is an interface for handling device plugin connections. -type ClientHandler interface { - PluginConnected(string, DevicePlugin) error - PluginDisconnected(string) - PluginListAndWatchReceiver(string, *api.ListAndWatchResponse) -} - -// TODO: evaluate whether we need these error definitions. -const ( - // errFailedToDialDevicePlugin is the error raised when the device plugin could not be - // reached on the registered socket - errFailedToDialDevicePlugin = "failed to dial device plugin:" - // errUnsupportedVersion is the error raised when the device plugin uses an API version not - // supported by the Kubelet registry - errUnsupportedVersion = "requested API version %q is not supported by kubelet. Supported version is %q" - // errInvalidResourceName is the error raised when a device plugin is registering - // itself with an invalid ResourceName - errInvalidResourceName = "the ResourceName %q is invalid" - // errBadSocket is the error raised when the registry socket path is not absolute - errBadSocket = "bad socketPath, must be an absolute path:" -) diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go deleted file mode 100644 index 98359ab32c0..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - "fmt" - "net" - "sync" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "k8s.io/klog/v2" - api "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" -) - -// DevicePlugin interface provides methods for accessing Device Plugin resources, API and unix socket. -type DevicePlugin interface { - API() api.DevicePluginClient - Resource() string - SocketPath() string -} - -// Client interface provides methods for establishing/closing gRPC connection and running the device plugin gRPC client. -type Client interface { - Connect() error - Run() - Disconnect() error -} - -type client struct { - mutex sync.Mutex - resource string - socket string - grpc *grpc.ClientConn - handler ClientHandler - client api.DevicePluginClient -} - -// NewPluginClient returns an initialized device plugin client. -func NewPluginClient(r string, socketPath string, h ClientHandler) Client { - return &client{ - resource: r, - socket: socketPath, - handler: h, - } -} - -// Connect is for establishing a gRPC connection between device manager and device plugin. -func (c *client) Connect() error { - client, conn, err := dial(c.socket) - if err != nil { - klog.ErrorS(err, "Unable to connect to device plugin client with socket path", "path", c.socket) - return err - } - c.mutex.Lock() - c.grpc = conn - c.client = client - c.mutex.Unlock() - return c.handler.PluginConnected(c.resource, c) -} - -// Run is for running the device plugin gRPC client. -func (c *client) Run() { - stream, err := c.client.ListAndWatch(context.Background(), &api.Empty{}) - if err != nil { - klog.ErrorS(err, "ListAndWatch ended unexpectedly for device plugin", "resource", c.resource) - return - } - - for { - response, err := stream.Recv() - if err != nil { - klog.ErrorS(err, "ListAndWatch ended unexpectedly for device plugin", "resource", c.resource) - return - } - klog.V(2).InfoS("State pushed for device plugin", "resource", c.resource, "resourceCapacity", len(response.Devices)) - c.handler.PluginListAndWatchReceiver(c.resource, response) - } -} - -// Disconnect is for closing gRPC connection between device manager and device plugin. -func (c *client) Disconnect() error { - c.mutex.Lock() - if c.grpc != nil { - if err := c.grpc.Close(); err != nil { - klog.V(2).ErrorS(err, "Failed to close grcp connection", "resource", c.Resource()) - } - c.grpc = nil - } - c.mutex.Unlock() - c.handler.PluginDisconnected(c.resource) - - klog.V(2).InfoS("Device plugin disconnected", "resource", c.resource) - return nil -} - -func (c *client) Resource() string { - return c.resource -} - -func (c *client) API() api.DevicePluginClient { - return c.client -} - -func (c *client) SocketPath() string { - return c.socket -} - -// dial establishes the gRPC communication with the registered device plugin. https://godoc.org/google.golang.org/grpc#Dial -func dial(unixSocketPath string) (api.DevicePluginClient, *grpc.ClientConn, error) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - c, err := grpc.DialContext(ctx, unixSocketPath, - grpc.WithAuthority("localhost"), - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), - grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, "unix", addr) - }), - ) - - if err != nil { - return nil, nil, fmt.Errorf(errFailedToDialDevicePlugin+" %v", err) - } - - return api.NewDevicePluginClient(c), c, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/handler.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/handler.go deleted file mode 100644 index 204afd3d1b1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/handler.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - "os" - "time" - - core "k8s.io/api/core/v1" - "k8s.io/klog/v2" - api "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" -) - -func (s *server) GetPluginHandler() cache.PluginHandler { - if f, err := os.Create(s.socketDir + "DEPRECATION"); err != nil { - klog.ErrorS(err, "Failed to create deprecation file at socket dir", "path", s.socketDir) - } else { - f.Close() - klog.V(4).InfoS("Created deprecation file", "path", f.Name()) - } - return s -} - -func (s *server) RegisterPlugin(pluginName string, endpoint string, versions []string, pluginClientTimeout *time.Duration) error { - klog.V(2).InfoS("Registering plugin at endpoint", "plugin", pluginName, "endpoint", endpoint) - return s.connectClient(pluginName, endpoint) -} - -func (s *server) DeRegisterPlugin(pluginName, endpoint string) { - klog.V(2).InfoS("Deregistering plugin", "plugin", pluginName, "endpoint", endpoint) - client := s.getClient(pluginName) - if client != nil { - s.disconnectClient(pluginName, client) - } -} - -func (s *server) ValidatePlugin(pluginName string, endpoint string, versions []string) error { - klog.V(2).InfoS("Got plugin at endpoint with versions", "plugin", pluginName, "endpoint", endpoint, "versions", versions) - - if !s.isVersionCompatibleWithPlugin(versions...) { - return fmt.Errorf("manager version, %s, is not among plugin supported versions %v", api.Version, versions) - } - - if !v1helper.IsExtendedResourceName(core.ResourceName(pluginName)) { - return fmt.Errorf("invalid name of device plugin socket: %s", fmt.Sprintf(errInvalidResourceName, pluginName)) - } - - klog.V(2).InfoS("Device plugin validated", "plugin", pluginName, "endpoint", endpoint, "versions", versions) - return nil -} - -func (s *server) connectClient(name string, socketPath string) error { - c := NewPluginClient(name, socketPath, s.chandler) - - s.registerClient(name, c) - if err := c.Connect(); err != nil { - s.deregisterClient(name) - klog.ErrorS(err, "Failed to connect to new client", "resource", name) - return err - } - - klog.V(2).InfoS("Connected to new client", "resource", name) - go func() { - s.runClient(name, c) - }() - - return nil -} - -func (s *server) disconnectClient(name string, c Client) error { - s.deregisterClient(name) - return c.Disconnect() -} -func (s *server) registerClient(name string, c Client) { - s.mutex.Lock() - defer s.mutex.Unlock() - - s.clients[name] = c - klog.V(2).InfoS("Registered client", "name", name) -} - -func (s *server) deregisterClient(name string) { - s.mutex.Lock() - defer s.mutex.Unlock() - - delete(s.clients, name) - klog.V(2).InfoS("Deregistered client", "name", name) -} - -func (s *server) runClient(name string, c Client) { - c.Run() - - c = s.getClient(name) - if c == nil { - return - } - - if err := s.disconnectClient(name, c); err != nil { - klog.ErrorS(err, "Unable to disconnect client", "resource", name, "client", c) - } -} - -func (s *server) getClient(name string) Client { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.clients[name] -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go deleted file mode 100644 index fd37315810e..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - "fmt" - "net" - "net/http" - "os" - "path/filepath" - "sync" - - "github.com/opencontainers/selinux/go-selinux" - "google.golang.org/grpc" - - core "k8s.io/api/core/v1" - "k8s.io/apiserver/pkg/server/healthz" - "k8s.io/klog/v2" - api "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/kubelet/config" - "k8s.io/kubernetes/pkg/kubelet/metrics" - "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" -) - -// Server interface provides methods for Device plugin registration server. -type Server interface { - cache.PluginHandler - healthz.HealthChecker - Start() error - Stop() error - SocketPath() string -} - -type server struct { - socketName string - socketDir string - mutex sync.Mutex - wg sync.WaitGroup - grpc *grpc.Server - rhandler RegistrationHandler - chandler ClientHandler - clients map[string]Client - - // isStarted indicates whether the service has started successfully. - isStarted bool -} - -// NewServer returns an initialized device plugin registration server. -func NewServer(socketPath string, rh RegistrationHandler, ch ClientHandler) (Server, error) { - if socketPath == "" || !filepath.IsAbs(socketPath) { - return nil, fmt.Errorf(errBadSocket+" %s", socketPath) - } - - dir, name := filepath.Split(socketPath) - - klog.V(2).InfoS("Creating device plugin registration server", "version", api.Version, "socket", socketPath) - s := &server{ - socketName: name, - socketDir: dir, - rhandler: rh, - chandler: ch, - clients: make(map[string]Client), - } - - return s, nil -} - -func (s *server) Start() error { - klog.V(2).InfoS("Starting device plugin registration server") - - if err := os.MkdirAll(s.socketDir, 0750); err != nil { - klog.ErrorS(err, "Failed to create the device plugin socket directory", "directory", s.socketDir) - return err - } - - if selinux.GetEnabled() { - if err := selinux.SetFileLabel(s.socketDir, config.KubeletPluginsDirSELinuxLabel); err != nil { - klog.ErrorS(err, "Unprivileged containerized plugins might not work. Could not set selinux context on socket dir", "path", s.socketDir) - } - } - - // For now, we leave cleanup of the *entire* directory up to the Handler - // (even though we should in theory be able to just wipe the whole directory) - // because the Handler stores its checkpoint file (amongst others) in here. - if err := s.rhandler.CleanupPluginDirectory(s.socketDir); err != nil { - klog.ErrorS(err, "Failed to cleanup the device plugin directory", "directory", s.socketDir) - return err - } - - ln, err := net.Listen("unix", s.SocketPath()) - if err != nil { - klog.ErrorS(err, "Failed to listen to socket while starting device plugin registry") - return err - } - - s.wg.Add(1) - s.grpc = grpc.NewServer([]grpc.ServerOption{}...) - - api.RegisterRegistrationServer(s.grpc, s) - go func() { - defer s.wg.Done() - s.setHealthy() - if err = s.grpc.Serve(ln); err != nil { - s.setUnhealthy() - klog.ErrorS(err, "Error while serving device plugin registration grpc server") - } - }() - - return nil -} - -func (s *server) Stop() error { - s.visitClients(func(r string, c Client) { - if err := s.disconnectClient(r, c); err != nil { - klog.ErrorS(err, "Failed to disconnect device plugin client", "resourceName", r) - } - }) - - s.mutex.Lock() - defer s.mutex.Unlock() - - if s.grpc == nil { - return nil - } - - s.grpc.Stop() - s.wg.Wait() - s.grpc = nil - // During kubelet termination, we do not need the registration server, - // and we consider the kubelet to be healthy even when it is down. - s.setHealthy() - klog.V(2).InfoS("Stopping device plugin registration server") - - return nil -} - -func (s *server) SocketPath() string { - return filepath.Join(s.socketDir, s.socketName) -} - -func (s *server) Register(ctx context.Context, r *api.RegisterRequest) (*api.Empty, error) { - klog.InfoS("Got registration request from device plugin with resource", "resourceName", r.ResourceName) - metrics.DevicePluginRegistrationCount.WithLabelValues(r.ResourceName).Inc() - - if !s.isVersionCompatibleWithPlugin(r.Version) { - err := fmt.Errorf(errUnsupportedVersion, r.Version, api.SupportedVersions) - klog.ErrorS(err, "Bad registration request from device plugin with resource", "resourceName", r.ResourceName) - return &api.Empty{}, err - } - - if !v1helper.IsExtendedResourceName(core.ResourceName(r.ResourceName)) { - err := fmt.Errorf(errInvalidResourceName, r.ResourceName) - klog.ErrorS(err, "Bad registration request from device plugin") - return &api.Empty{}, err - } - - if err := s.connectClient(r.ResourceName, filepath.Join(s.socketDir, r.Endpoint)); err != nil { - klog.ErrorS(err, "Error connecting to device plugin client") - return &api.Empty{}, err - } - - return &api.Empty{}, nil -} - -func (s *server) isVersionCompatibleWithPlugin(versions ...string) bool { - // TODO(vikasc): Currently this is fine as we only have a single supported version. When we do need to support - // multiple versions in the future, we may need to extend this function to return a supported version. - // E.g., say kubelet supports v1beta1 and v1beta2, and we get v1alpha1 and v1beta1 from a device plugin, - // this function should return v1beta1 - for _, version := range versions { - for _, supportedVersion := range api.SupportedVersions { - if version == supportedVersion { - return true - } - } - } - return false -} - -func (s *server) visitClients(visit func(r string, c Client)) { - s.mutex.Lock() - for r, c := range s.clients { - s.mutex.Unlock() - visit(r, c) - s.mutex.Lock() - } - s.mutex.Unlock() -} - -func (s *server) Name() string { - return "device-plugin" -} - -func (s *server) Check(_ *http.Request) error { - if s.isStarted { - return nil - } - return fmt.Errorf("device plugin registration gRPC server failed and no device plugins can register") -} - -// setHealthy sets the health status of the gRPC server. -func (s *server) setHealthy() { - s.isStarted = true -} - -// setUnhealthy sets the health status of the gRPC server to unhealthy. -func (s *server) setUnhealthy() { - s.isStarted = false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go deleted file mode 100644 index 9550d35e2d6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go +++ /dev/null @@ -1,388 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - "net" - "os" - "path/filepath" - "sync" - "time" - - "github.com/fsnotify/fsnotify" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" - pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" - watcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" -) - -// Stub implementation for DevicePlugin. -type Stub struct { - devs []*pluginapi.Device - socket string - resourceName string - preStartContainerFlag bool - getPreferredAllocationFlag bool - - stop chan interface{} - wg sync.WaitGroup - update chan []*pluginapi.Device - - server *grpc.Server - - // allocFunc is used for handling allocation request - allocFunc stubAllocFunc - - // getPreferredAllocFunc is used for handling getPreferredAllocation request - getPreferredAllocFunc stubGetPreferredAllocFunc - - // registerControlFunc is used for controlling auto-registration of requests - registerControlFunc stubRegisterControlFunc - - registrationStatus chan watcherapi.RegistrationStatus // for testing - endpoint string // for testing - - kubeletRestartWatcher *fsnotify.Watcher -} - -// stubGetPreferredAllocFunc is the function called when a getPreferredAllocation request is received from Kubelet -type stubGetPreferredAllocFunc func(r *pluginapi.PreferredAllocationRequest, devs map[string]pluginapi.Device) (*pluginapi.PreferredAllocationResponse, error) - -func defaultGetPreferredAllocFunc(r *pluginapi.PreferredAllocationRequest, devs map[string]pluginapi.Device) (*pluginapi.PreferredAllocationResponse, error) { - var response pluginapi.PreferredAllocationResponse - - return &response, nil -} - -// stubAllocFunc is the function called when an allocation request is received from Kubelet -type stubAllocFunc func(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) - -func defaultAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) { - var response pluginapi.AllocateResponse - - return &response, nil -} - -// stubRegisterControlFunc is the function called when a registration request is received from Kubelet -type stubRegisterControlFunc func() bool - -func defaultRegisterControlFunc() bool { - return true -} - -// NewDevicePluginStub returns an initialized DevicePlugin Stub. -func NewDevicePluginStub(devs []*pluginapi.Device, socket string, name string, preStartContainerFlag bool, getPreferredAllocationFlag bool) *Stub { - - watcher, err := fsnotify.NewWatcher() - if err != nil { - klog.ErrorS(err, "Watcher creation failed") - panic(err) - } - - return &Stub{ - devs: devs, - socket: socket, - resourceName: name, - preStartContainerFlag: preStartContainerFlag, - getPreferredAllocationFlag: getPreferredAllocationFlag, - registerControlFunc: defaultRegisterControlFunc, - - stop: make(chan interface{}), - update: make(chan []*pluginapi.Device), - - allocFunc: defaultAllocFunc, - getPreferredAllocFunc: defaultGetPreferredAllocFunc, - kubeletRestartWatcher: watcher, - } -} - -// SetGetPreferredAllocFunc sets allocFunc of the device plugin -func (m *Stub) SetGetPreferredAllocFunc(f stubGetPreferredAllocFunc) { - m.getPreferredAllocFunc = f -} - -// SetAllocFunc sets allocFunc of the device plugin -func (m *Stub) SetAllocFunc(f stubAllocFunc) { - m.allocFunc = f -} - -// SetRegisterControlFunc sets RegisterControlFunc of the device plugin -func (m *Stub) SetRegisterControlFunc(f stubRegisterControlFunc) { - m.registerControlFunc = f -} - -// Start starts the gRPC server of the device plugin. Can only -// be called once. -func (m *Stub) Start() error { - klog.InfoS("Starting device plugin server") - err := m.cleanup() - if err != nil { - return err - } - - sock, err := net.Listen("unix", m.socket) - if err != nil { - return err - } - - m.wg.Add(1) - m.server = grpc.NewServer([]grpc.ServerOption{}...) - pluginapi.RegisterDevicePluginServer(m.server, m) - watcherapi.RegisterRegistrationServer(m.server, m) - - err = m.kubeletRestartWatcher.Add(filepath.Dir(m.socket)) - if err != nil { - klog.ErrorS(err, "Failed to add watch", "devicePluginPath", pluginapi.DevicePluginPath) - return err - } - - go func() { - defer m.wg.Done() - if err = m.server.Serve(sock); err != nil { - klog.ErrorS(err, "Error while serving device plugin registration grpc server") - } - }() - - var lastDialErr error - wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { - var conn *grpc.ClientConn - _, conn, lastDialErr = dial(m.socket) - if lastDialErr != nil { - return false, nil - } - conn.Close() - return true, nil - }) - if lastDialErr != nil { - return lastDialErr - } - - klog.InfoS("Starting to serve on socket", "socket", m.socket) - return nil -} - -func (m *Stub) Restart() error { - klog.InfoS("Restarting Device Plugin server") - if m.server == nil { - return nil - } - - m.server.Stop() - m.server = nil - - return m.Start() -} - -// Stop stops the gRPC server. Can be called without a prior Start -// and more than once. Not safe to be called concurrently by different -// goroutines! -func (m *Stub) Stop() error { - klog.InfoS("Stopping device plugin server") - if m.server == nil { - return nil - } - - m.kubeletRestartWatcher.Close() - - m.server.Stop() - m.wg.Wait() - m.server = nil - close(m.stop) // This prevents re-starting the server. - - return m.cleanup() -} - -func (m *Stub) Watch(kubeletEndpoint, resourceName, pluginSockDir string) { - for { - select { - // Detect a kubelet restart by watching for a newly created - // 'pluginapi.KubeletSocket' file. When this occurs, restart - // the device plugin server - case event := <-m.kubeletRestartWatcher.Events: - if event.Name == kubeletEndpoint && event.Op&fsnotify.Create == fsnotify.Create { - klog.InfoS("inotify: file created, restarting", "kubeletEndpoint", kubeletEndpoint) - var lastErr error - - err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 2*time.Minute, false, func(context.Context) (done bool, err error) { - restartErr := m.Restart() - if restartErr == nil { - return true, nil - } - klog.ErrorS(restartErr, "Retrying after error") - lastErr = restartErr - return false, nil - }) - if err != nil { - klog.ErrorS(err, "Unable to restart server: wait timed out", "lastErr", lastErr.Error()) - panic(err) - } - - if ok := m.registerControlFunc(); ok { - if err := m.Register(kubeletEndpoint, resourceName, pluginSockDir); err != nil { - klog.ErrorS(err, "Unable to register to kubelet") - panic(err) - } - } - } - - // Watch for any other fs errors and log them. - case err := <-m.kubeletRestartWatcher.Errors: - klog.ErrorS(err, "inotify error") - } - } -} - -// GetInfo is the RPC which return pluginInfo -func (m *Stub) GetInfo(ctx context.Context, req *watcherapi.InfoRequest) (*watcherapi.PluginInfo, error) { - klog.InfoS("GetInfo") - return &watcherapi.PluginInfo{ - Type: watcherapi.DevicePlugin, - Name: m.resourceName, - Endpoint: m.endpoint, - SupportedVersions: []string{pluginapi.Version}}, nil -} - -// NotifyRegistrationStatus receives the registration notification from watcher -func (m *Stub) NotifyRegistrationStatus(ctx context.Context, status *watcherapi.RegistrationStatus) (*watcherapi.RegistrationStatusResponse, error) { - if m.registrationStatus != nil { - m.registrationStatus <- *status - } - if !status.PluginRegistered { - klog.InfoS("Registration failed", "err", status.Error) - } - return &watcherapi.RegistrationStatusResponse{}, nil -} - -// Register registers the device plugin for the given resourceName with Kubelet. -func (m *Stub) Register(kubeletEndpoint, resourceName string, pluginSockDir string) error { - klog.InfoS("Register", "kubeletEndpoint", kubeletEndpoint, "resourceName", resourceName, "socket", pluginSockDir) - - if pluginSockDir != "" { - if _, err := os.Stat(pluginSockDir + "DEPRECATION"); err == nil { - klog.InfoS("Deprecation file found. Skip registration") - return nil - } - } - klog.InfoS("Deprecation file not found. Invoke registration") - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - conn, err := grpc.DialContext(ctx, kubeletEndpoint, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), - grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, "unix", addr) - })) - if err != nil { - return err - } - defer conn.Close() - client := pluginapi.NewRegistrationClient(conn) - reqt := &pluginapi.RegisterRequest{ - Version: pluginapi.Version, - Endpoint: filepath.Base(m.socket), - ResourceName: resourceName, - Options: &pluginapi.DevicePluginOptions{ - PreStartRequired: m.preStartContainerFlag, - GetPreferredAllocationAvailable: m.getPreferredAllocationFlag, - }, - } - - _, err = client.Register(context.Background(), reqt) - if err != nil { - // Stop server - m.server.Stop() - klog.ErrorS(err, "Client unable to register to kubelet") - return err - } - klog.InfoS("Device Plugin registered with the Kubelet") - return err -} - -// GetDevicePluginOptions returns DevicePluginOptions settings for the device plugin. -func (m *Stub) GetDevicePluginOptions(ctx context.Context, e *pluginapi.Empty) (*pluginapi.DevicePluginOptions, error) { - options := &pluginapi.DevicePluginOptions{ - PreStartRequired: m.preStartContainerFlag, - GetPreferredAllocationAvailable: m.getPreferredAllocationFlag, - } - return options, nil -} - -// PreStartContainer resets the devices received -func (m *Stub) PreStartContainer(ctx context.Context, r *pluginapi.PreStartContainerRequest) (*pluginapi.PreStartContainerResponse, error) { - klog.InfoS("PreStartContainer", "request", r) - return &pluginapi.PreStartContainerResponse{}, nil -} - -// ListAndWatch lists devices and update that list according to the Update call -func (m *Stub) ListAndWatch(e *pluginapi.Empty, s pluginapi.DevicePlugin_ListAndWatchServer) error { - klog.InfoS("ListAndWatch") - - s.Send(&pluginapi.ListAndWatchResponse{Devices: m.devs}) - - for { - select { - case <-m.stop: - return nil - case updated := <-m.update: - s.Send(&pluginapi.ListAndWatchResponse{Devices: updated}) - } - } -} - -// Update allows the device plugin to send new devices through ListAndWatch -func (m *Stub) Update(devs []*pluginapi.Device) { - m.update <- devs -} - -// GetPreferredAllocation gets the preferred allocation from a set of available devices -func (m *Stub) GetPreferredAllocation(ctx context.Context, r *pluginapi.PreferredAllocationRequest) (*pluginapi.PreferredAllocationResponse, error) { - klog.InfoS("GetPreferredAllocation", "request", r) - - devs := make(map[string]pluginapi.Device) - - for _, dev := range m.devs { - devs[dev.ID] = *dev - } - - return m.getPreferredAllocFunc(r, devs) -} - -// Allocate does a mock allocation -func (m *Stub) Allocate(ctx context.Context, r *pluginapi.AllocateRequest) (*pluginapi.AllocateResponse, error) { - klog.InfoS("Allocate", "request", r) - - devs := make(map[string]pluginapi.Device) - - for _, dev := range m.devs { - devs[dev.ID] = *dev - } - - return m.allocFunc(r, devs) -} - -func (m *Stub) cleanup() error { - if err := os.Remove(m.socket); err != nil && !os.IsNotExist(err) { - return err - } - - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go deleted file mode 100644 index 806ab3c616d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go +++ /dev/null @@ -1,454 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package devicemanager - -import ( - "maps" - "sync" - - "k8s.io/klog/v2" - - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" - kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" -) - -type deviceAllocateInfo struct { - // deviceIds contains device Ids allocated to this container for the given resourceName. - deviceIds checkpoint.DevicesPerNUMA - // allocResp contains cached rpc AllocateResponse. - allocResp *pluginapi.ContainerAllocateResponse -} - -type resourceAllocateInfo map[string]deviceAllocateInfo // Keyed by resourceName. -type containerDevices map[string]resourceAllocateInfo // Keyed by containerName. -type podDevices struct { - sync.RWMutex - devs map[string]containerDevices // Keyed by podUID. -} - -// NewPodDevices is a function that returns object of podDevices type with its own guard -// RWMutex and a map where key is a pod UID and value contains -// container devices information of type containerDevices. -func newPodDevices() *podDevices { - return &podDevices{devs: make(map[string]containerDevices)} -} - -func (pdev *podDevices) pods() sets.Set[string] { - pdev.RLock() - defer pdev.RUnlock() - ret := sets.New[string]() - for k := range pdev.devs { - ret.Insert(k) - } - return ret -} - -func (pdev *podDevices) size() int { - pdev.RLock() - defer pdev.RUnlock() - return len(pdev.devs) -} - -func (pdev *podDevices) hasPod(podUID string) bool { - pdev.RLock() - defer pdev.RUnlock() - _, podExists := pdev.devs[podUID] - return podExists -} - -func (pdev *podDevices) insert(podUID, contName, resource string, devices checkpoint.DevicesPerNUMA, resp *pluginapi.ContainerAllocateResponse) { - pdev.Lock() - defer pdev.Unlock() - if _, podExists := pdev.devs[podUID]; !podExists { - pdev.devs[podUID] = make(containerDevices) - } - if _, contExists := pdev.devs[podUID][contName]; !contExists { - pdev.devs[podUID][contName] = make(resourceAllocateInfo) - } - pdev.devs[podUID][contName][resource] = deviceAllocateInfo{ - deviceIds: devices, - allocResp: resp, - } -} - -func (pdev *podDevices) delete(pods []string) { - pdev.Lock() - defer pdev.Unlock() - for _, uid := range pods { - delete(pdev.devs, uid) - } -} - -// Returns list of device Ids allocated to the given pod for the given resource. -// Returns nil if we don't have cached state for the given . -func (pdev *podDevices) podDevices(podUID, resource string) sets.Set[string] { - pdev.RLock() - defer pdev.RUnlock() - - ret := sets.New[string]() - for contName := range pdev.devs[podUID] { - ret = ret.Union(pdev.containerDevices(podUID, contName, resource)) - } - return ret -} - -// Returns list of device Ids allocated to the given container for the given resource. -// Returns nil if we don't have cached state for the given . -func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets.Set[string] { - pdev.RLock() - defer pdev.RUnlock() - if _, podExists := pdev.devs[podUID]; !podExists { - return nil - } - if _, contExists := pdev.devs[podUID][contName]; !contExists { - return nil - } - devs, resourceExists := pdev.devs[podUID][contName][resource] - if !resourceExists { - return nil - } - return devs.deviceIds.Devices() -} - -// Populates allocatedResources with the device resources allocated to the specified . -func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) { - pdev.RLock() - defer pdev.RUnlock() - containers, exists := pdev.devs[podUID] - if !exists { - return - } - resources, exists := containers[contName] - if !exists { - return - } - for resource, devices := range resources { - allocatedResources[resource] = allocatedResources[resource].Union(devices.deviceIds.Devices()) - } -} - -// Removes the device resources allocated to the specified from allocatedResources. -func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) { - pdev.RLock() - defer pdev.RUnlock() - containers, exists := pdev.devs[podUID] - if !exists { - return - } - resources, exists := containers[contName] - if !exists { - return - } - for resource, devices := range resources { - allocatedResources[resource] = allocatedResources[resource].Difference(devices.deviceIds.Devices()) - } -} - -// Returns all devices allocated to the pods being tracked, keyed by resourceName. -func (pdev *podDevices) devices() map[string]sets.Set[string] { - ret := make(map[string]sets.Set[string]) - pdev.RLock() - defer pdev.RUnlock() - for _, containerDevices := range pdev.devs { - for _, resources := range containerDevices { - for resource, devices := range resources { - if _, exists := ret[resource]; !exists { - ret[resource] = sets.New[string]() - } - if devices.allocResp != nil { - ret[resource] = ret[resource].Union(devices.deviceIds.Devices()) - } - } - } - } - return ret -} - -// Returns podUID and containerName for a device -func (pdev *podDevices) getPodAndContainerForDevice(deviceID string) (string, string) { - pdev.RLock() - defer pdev.RUnlock() - for podUID, containerDevices := range pdev.devs { - for containerName, resources := range containerDevices { - for _, devices := range resources { - if devices.deviceIds.Devices().Has(deviceID) { - return podUID, containerName - } - } - } - } - return "", "" -} - -// Turns podDevices to checkpointData. -func (pdev *podDevices) toCheckpointData() []checkpoint.PodDevicesEntry { - var data []checkpoint.PodDevicesEntry - pdev.RLock() - defer pdev.RUnlock() - for podUID, containerDevices := range pdev.devs { - for conName, resources := range containerDevices { - for resource, devices := range resources { - if devices.allocResp == nil { - klog.ErrorS(nil, "Can't marshal allocResp, allocation response is missing", "podUID", podUID, "containerName", conName, "resourceName", resource) - continue - } - - allocResp, err := devices.allocResp.Marshal() - if err != nil { - klog.ErrorS(err, "Can't marshal allocResp", "podUID", podUID, "containerName", conName, "resourceName", resource) - continue - } - data = append(data, checkpoint.PodDevicesEntry{ - PodUID: podUID, - ContainerName: conName, - ResourceName: resource, - DeviceIDs: devices.deviceIds, - AllocResp: allocResp}) - } - } - } - return data -} - -// Populates podDevices from the passed in checkpointData. -func (pdev *podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) { - for _, entry := range data { - klog.V(2).InfoS("Get checkpoint entry", - "podUID", entry.PodUID, "containerName", entry.ContainerName, - "resourceName", entry.ResourceName, "deviceIDs", entry.DeviceIDs, "allocated", entry.AllocResp) - allocResp := &pluginapi.ContainerAllocateResponse{} - err := allocResp.Unmarshal(entry.AllocResp) - if err != nil { - klog.ErrorS(err, "Can't unmarshal allocResp", "podUID", entry.PodUID, "containerName", entry.ContainerName, "resourceName", entry.ResourceName) - continue - } - pdev.insert(entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceIDs, allocResp) - } -} - -// Returns combined container runtime settings to consume the container's allocated devices. -func (pdev *podDevices) deviceRunContainerOptions(podUID, contName string) *DeviceRunContainerOptions { - pdev.RLock() - defer pdev.RUnlock() - - containers, exists := pdev.devs[podUID] - if !exists { - return nil - } - resources, exists := containers[contName] - if !exists { - return nil - } - opts := &DeviceRunContainerOptions{} - // Maps to detect duplicate settings. - devsMap := make(map[string]string) - mountsMap := make(map[string]string) - envsMap := make(map[string]string) - annotationsMap := make(map[string]string) - // Keep track of all CDI devices requested for the container. - allCDIDevices := sets.New[string]() - // Loops through AllocationResponses of all cached device resources. - for _, devices := range resources { - resp := devices.allocResp - // Each Allocate response has the following artifacts. - // Environment variables - // Mount points - // Device files - // Container annotations - // CDI device IDs - // These artifacts are per resource per container. - // Updates RunContainerOptions.Envs. - for k, v := range resp.Envs { - if e, ok := envsMap[k]; ok { - klog.V(4).InfoS("Skip existing env", "envKey", k, "envValue", v) - if e != v { - klog.ErrorS(nil, "Environment variable has conflicting setting", "envKey", k, "expected", v, "got", e) - } - continue - } - klog.V(4).InfoS("Add env", "envKey", k, "envValue", v) - envsMap[k] = v - opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v}) - } - - // Updates RunContainerOptions.Devices. - for _, dev := range resp.Devices { - if d, ok := devsMap[dev.ContainerPath]; ok { - klog.V(4).InfoS("Skip existing device", "containerPath", dev.ContainerPath, "hostPath", dev.HostPath) - if d != dev.HostPath { - klog.ErrorS(nil, "Container device has conflicting mapping host devices", - "containerPath", dev.ContainerPath, "got", d, "expected", dev.HostPath) - } - continue - } - klog.V(4).InfoS("Add device", "containerPath", dev.ContainerPath, "hostPath", dev.HostPath) - devsMap[dev.ContainerPath] = dev.HostPath - opts.Devices = append(opts.Devices, kubecontainer.DeviceInfo{ - PathOnHost: dev.HostPath, - PathInContainer: dev.ContainerPath, - Permissions: dev.Permissions, - }) - } - - // Updates RunContainerOptions.Mounts. - for _, mount := range resp.Mounts { - if m, ok := mountsMap[mount.ContainerPath]; ok { - klog.V(4).InfoS("Skip existing mount", "containerPath", mount.ContainerPath, "hostPath", mount.HostPath) - if m != mount.HostPath { - klog.ErrorS(nil, "Container mount has conflicting mapping host mounts", - "containerPath", mount.ContainerPath, "conflictingPath", m, "hostPath", mount.HostPath) - } - continue - } - klog.V(4).InfoS("Add mount", "containerPath", mount.ContainerPath, "hostPath", mount.HostPath) - mountsMap[mount.ContainerPath] = mount.HostPath - opts.Mounts = append(opts.Mounts, kubecontainer.Mount{ - Name: mount.ContainerPath, - ContainerPath: mount.ContainerPath, - HostPath: mount.HostPath, - ReadOnly: mount.ReadOnly, - // TODO: This may need to be part of Device plugin API. - SELinuxRelabel: false, - }) - } - - // Updates for Annotations - for k, v := range resp.Annotations { - if e, ok := annotationsMap[k]; ok { - klog.V(4).InfoS("Skip existing annotation", "annotationKey", k, "annotationValue", v) - if e != v { - klog.ErrorS(nil, "Annotation has conflicting setting", "annotationKey", k, "expected", e, "got", v) - } - continue - } - klog.V(4).InfoS("Add annotation", "annotationKey", k, "annotationValue", v) - annotationsMap[k] = v - opts.Annotations = append(opts.Annotations, kubecontainer.Annotation{Name: k, Value: v}) - } - - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DevicePluginCDIDevices) { - // Updates for CDI devices. - cdiDevices := getCDIDeviceInfo(resp, allCDIDevices) - opts.CDIDevices = append(opts.CDIDevices, cdiDevices...) - } - } - - return opts -} - -// getCDIDeviceInfo returns CDI devices from an allocate response -func getCDIDeviceInfo(resp *pluginapi.ContainerAllocateResponse, knownCDIDevices sets.Set[string]) []kubecontainer.CDIDevice { - var cdiDevices []kubecontainer.CDIDevice - for _, cdiDevice := range resp.CDIDevices { - if knownCDIDevices.Has(cdiDevice.Name) { - klog.V(4).InfoS("Skip existing CDI Device", "name", cdiDevice.Name) - continue - } - klog.V(4).InfoS("Add CDI device", "name", cdiDevice.Name) - knownCDIDevices.Insert(cdiDevice.Name) - - device := kubecontainer.CDIDevice{ - Name: cdiDevice.Name, - } - cdiDevices = append(cdiDevices, device) - } - - return cdiDevices -} - -// getContainerDevices returns the devices assigned to the provided container for all ResourceNames -func (pdev *podDevices) getContainerDevices(podUID, contName string) ResourceDeviceInstances { - pdev.RLock() - defer pdev.RUnlock() - - if _, podExists := pdev.devs[podUID]; !podExists { - return nil - } - if _, contExists := pdev.devs[podUID][contName]; !contExists { - return nil - } - resDev := NewResourceDeviceInstances() - for resource, allocateInfo := range pdev.devs[podUID][contName] { - if len(allocateInfo.deviceIds) == 0 { - continue - } - devicePluginMap := make(map[string]pluginapi.Device) - for numaid, devlist := range allocateInfo.deviceIds { - for _, devID := range devlist { - var topology *pluginapi.TopologyInfo - if numaid != nodeWithoutTopology { - NUMANodes := []*pluginapi.NUMANode{{ID: numaid}} - if pDev, ok := devicePluginMap[devID]; ok && pDev.Topology != nil { - if nodes := pDev.Topology.GetNodes(); nodes != nil { - NUMANodes = append(NUMANodes, nodes...) - } - } - - // ID and Healthy are not relevant here. - topology = &pluginapi.TopologyInfo{Nodes: NUMANodes} - } - devicePluginMap[devID] = pluginapi.Device{ - Topology: topology, - } - } - } - resDev[resource] = devicePluginMap - } - return resDev -} - -// DeviceInstances is a mapping device name -> plugin device data -type DeviceInstances map[string]pluginapi.Device - -// ResourceDeviceInstances is a mapping resource name -> DeviceInstances -type ResourceDeviceInstances map[string]DeviceInstances - -// NewResourceDeviceInstances returns a new ResourceDeviceInstances -func NewResourceDeviceInstances() ResourceDeviceInstances { - return make(ResourceDeviceInstances) -} - -// Clone returns a clone of ResourceDeviceInstances -func (rdev ResourceDeviceInstances) Clone() ResourceDeviceInstances { - clone := NewResourceDeviceInstances() - for resourceName, resourceDevs := range rdev { - clone[resourceName] = maps.Clone(resourceDevs) - } - return clone -} - -// Filter takes a condition set expressed as map[string]sets.Set[string] and returns a new -// ResourceDeviceInstances with only the devices matching the condition set. -func (rdev ResourceDeviceInstances) Filter(cond map[string]sets.Set[string]) ResourceDeviceInstances { - filtered := NewResourceDeviceInstances() - for resourceName, filterIDs := range cond { - if _, exists := rdev[resourceName]; !exists { - continue - } - filtered[resourceName] = DeviceInstances{} - for instanceID, instance := range rdev[resourceName] { - if filterIDs.Has(instanceID) { - filtered[resourceName][instanceID] = instance - } - } - } - return filtered -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go deleted file mode 100644 index f4d7fa73a8a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package devicemanager - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/component-helpers/resource" - "k8s.io/klog/v2" - pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" - - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" -) - -// GetTopologyHints implements the TopologyManager HintProvider Interface which -// ensures the Device Manager is consulted when Topology Aware Hints for each -// container are created. -func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - // Garbage collect any stranded device resources before providing TopologyHints - m.UpdateAllocatedDevices() - - // Loop through all device resources and generate TopologyHints for them. - deviceHints := make(map[string][]topologymanager.TopologyHint) - accumulatedResourceRequests := m.getContainerDeviceRequest(container) - - m.mutex.Lock() - defer m.mutex.Unlock() - for resource, requested := range accumulatedResourceRequests { - // Only consider devices that actually contain topology information. - if aligned := m.deviceHasTopologyAlignment(resource); !aligned { - klog.InfoS("Resource does not have a topology preference", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested) - deviceHints[resource] = nil - continue - } - - // Short circuit to regenerate the same hints if there are already - // devices allocated to the Container. This might happen after a - // kubelet restart, for example. - allocated := m.podDevices.containerDevices(string(pod.UID), container.Name, resource) - if allocated.Len() > 0 { - if allocated.Len() != requested { - klog.InfoS("Resource already allocated to pod with different number than request", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested, "allocated", allocated.Len()) - deviceHints[resource] = []topologymanager.TopologyHint{} - continue - } - klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name) - deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested) - continue - } - - // Get the list of available devices, for which TopologyHints should be generated. - available := m.getAvailableDevices(resource) - reusable := m.devicesToReuse[string(pod.UID)][resource] - if available.Union(reusable).Len() < requested { - klog.InfoS("Unable to generate topology hints: requested number of devices unavailable", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested, "available", available.Union(reusable).Len()) - deviceHints[resource] = []topologymanager.TopologyHint{} - continue - } - - // Generate TopologyHints for this resource given the current - // request size and the list of available devices. - deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, reusable, requested) - } - - return deviceHints -} - -// GetPodTopologyHints implements the topologymanager.HintProvider Interface which -// ensures the Device Manager is consulted when Topology Aware Hints for Pod are created. -func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { - // Garbage collect any stranded device resources before providing TopologyHints - m.UpdateAllocatedDevices() - - deviceHints := make(map[string][]topologymanager.TopologyHint) - accumulatedResourceRequests := m.getPodDeviceRequest(pod) - - m.mutex.Lock() - defer m.mutex.Unlock() - for resource, requested := range accumulatedResourceRequests { - // Only consider devices that actually contain topology information. - if aligned := m.deviceHasTopologyAlignment(resource); !aligned { - klog.InfoS("Resource does not have a topology preference", "resourceName", resource, "pod", klog.KObj(pod), "request", requested) - deviceHints[resource] = nil - continue - } - - // Short circuit to regenerate the same hints if there are already - // devices allocated to the Pod. This might happen after a - // kubelet restart, for example. - allocated := m.podDevices.podDevices(string(pod.UID), resource) - if allocated.Len() > 0 { - if allocated.Len() != requested { - klog.InfoS("Resource already allocated to pod with different number than request", "resourceName", resource, "pod", klog.KObj(pod), "request", requested, "allocated", allocated.Len()) - deviceHints[resource] = []topologymanager.TopologyHint{} - continue - } - klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resourceName", resource, "pod", klog.KObj(pod), "allocated", allocated.Len()) - deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested) - continue - } - - // Get the list of available devices, for which TopologyHints should be generated. - available := m.getAvailableDevices(resource) - if available.Len() < requested { - klog.InfoS("Unable to generate topology hints: requested number of devices unavailable", "resourceName", resource, "pod", klog.KObj(pod), "request", requested, "available", available.Len()) - deviceHints[resource] = []topologymanager.TopologyHint{} - continue - } - - // Generate TopologyHints for this resource given the current - // request size and the list of available devices. - deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, sets.Set[string]{}, requested) - } - - return deviceHints -} - -func (m *ManagerImpl) deviceHasTopologyAlignment(resource string) bool { - // If any device has Topology NUMANodes available, we assume they care about alignment. - for _, device := range m.allDevices[resource] { - if device.Topology != nil && len(device.Topology.Nodes) > 0 { - return true - } - } - return false -} - -func (m *ManagerImpl) getAvailableDevices(resource string) sets.Set[string] { - // Strip all devices in use from the list of healthy ones. - return m.healthyDevices[resource].Difference(m.allocatedDevices[resource]) -} - -func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available sets.Set[string], reusable sets.Set[string], request int) []topologymanager.TopologyHint { - // Initialize minAffinitySize to include all NUMA Nodes - minAffinitySize := len(m.numaNodes) - - // Iterate through all combinations of NUMA Nodes and build hints from them. - hints := []topologymanager.TopologyHint{} - bitmask.IterateBitMasks(m.numaNodes, func(mask bitmask.BitMask) { - // First, update minAffinitySize for the current request size. - devicesInMask := 0 - for _, device := range m.allDevices[resource] { - if mask.AnySet(m.getNUMANodeIds(device.Topology)) { - devicesInMask++ - } - } - if devicesInMask >= request && mask.Count() < minAffinitySize { - minAffinitySize = mask.Count() - } - - // Then check to see if all the reusable devices are part of the bitmask. - numMatching := 0 - for d := range reusable { - // Skip the device if it doesn't specify any topology info. - if m.allDevices[resource][d].Topology == nil { - continue - } - // Otherwise disregard this mask if its NUMANode isn't part of it. - if !mask.AnySet(m.getNUMANodeIds(m.allDevices[resource][d].Topology)) { - return - } - numMatching++ - } - - // Finally, check to see if enough available devices remain on the - // current NUMA node combination to satisfy the device request. - for d := range available { - if mask.AnySet(m.getNUMANodeIds(m.allDevices[resource][d].Topology)) { - numMatching++ - } - } - - // If they don't, then move onto the next combination. - if numMatching < request { - return - } - - // Otherwise, create a new hint from the NUMA mask and add it to the - // list of hints. We set all hint preferences to 'false' on the first - // pass through. - hints = append(hints, topologymanager.TopologyHint{ - NUMANodeAffinity: mask, - Preferred: false, - }) - }) - - // Loop back through all hints and update the 'Preferred' field based on - // counting the number of bits sets in the affinity mask and comparing it - // to the minAffinity. Only those with an equal number of bits set will be - // considered preferred. - for i := range hints { - if hints[i].NUMANodeAffinity.Count() == minAffinitySize { - hints[i].Preferred = true - } - } - - return hints -} - -func (m *ManagerImpl) getNUMANodeIds(topology *pluginapi.TopologyInfo) []int { - if topology == nil { - return nil - } - var ids []int - for _, n := range topology.Nodes { - ids = append(ids, int(n.ID)) - } - return ids -} - -func (m *ManagerImpl) getPodDeviceRequest(pod *v1.Pod) map[string]int { - // for these device plugin resources, requests == limits - limits := resource.PodLimits(pod, resource.PodResourcesOptions{ - ExcludeOverhead: true, - }) - podRequests := make(map[string]int) - for resourceName, quantity := range limits { - if !m.isDevicePluginResource(string(resourceName)) { - continue - } - podRequests[string(resourceName)] = int(quantity.Value()) - } - return podRequests -} - -func (m *ManagerImpl) getContainerDeviceRequest(container *v1.Container) map[string]int { - containerRequests := make(map[string]int) - for resourceObj, requestedObj := range container.Resources.Limits { - resource := string(resourceObj) - requested := int(requestedObj.Value()) - if !m.isDevicePluginResource(resource) { - continue - } - containerRequests[resource] = requested - } - return containerRequests -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go deleted file mode 100644 index bdf0b27d673..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package devicemanager - -import ( - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/server/healthz" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/config" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" - "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// Manager manages all the Device Plugins running on a node. -type Manager interface { - // Start starts device plugin registration service. - Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.Set[string]) error - - // Allocate configures and assigns devices to a container in a pod. From - // the requested device resources, Allocate will communicate with the - // owning device plugin to allow setup procedures to take place, and for - // the device plugin to provide runtime settings to use the device - // (environment variables, mount points and device files). - Allocate(pod *v1.Pod, container *v1.Container) error - - // UpdatePluginResources updates node resources based on devices already - // allocated to pods. The node object is provided for the device manager to - // update the node capacity to reflect the currently available devices. - UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error - - // Stop stops the manager. - Stop() error - - // GetDeviceRunContainerOptions checks whether we have cached containerDevices - // for the passed-in and returns its DeviceRunContainerOptions - // for the found one. An empty struct is returned in case no cached state is found. - GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*DeviceRunContainerOptions, error) - - // GetCapacity returns the amount of available device plugin resource capacity, resource allocatable - // and inactive device plugin resources previously registered on the node. - GetCapacity() (v1.ResourceList, v1.ResourceList, []string) - - // GetWatcherHandler returns the plugin handler for the device manager. - GetWatcherHandler() cache.PluginHandler - GetHealthChecker() healthz.HealthChecker - - // GetDevices returns information about the devices assigned to pods and containers - GetDevices(podUID, containerName string) ResourceDeviceInstances - - // UpdateAllocatedResourcesStatus updates the status of allocated resources for the pod. - UpdateAllocatedResourcesStatus(pod *v1.Pod, status *v1.PodStatus) - - // GetAllocatableDevices returns information about all the devices known to the manager - GetAllocatableDevices() ResourceDeviceInstances - - // ShouldResetExtendedResourceCapacity returns whether the extended resources should be reset or not, - // depending on the checkpoint file availability. Absence of the checkpoint file strongly indicates - // the node has been recreated. - ShouldResetExtendedResourceCapacity() bool - - // TopologyManager HintProvider provider indicates the Device Manager implements the Topology Manager Interface - // and is consulted to make Topology aware resource alignments - GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint - - // TopologyManager HintProvider provider indicates the Device Manager implements the Topology Manager Interface - // and is consulted to make Topology aware resource alignments per Pod - GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint - - // UpdateAllocatedDevices frees any Devices that are bound to terminated pods. - UpdateAllocatedDevices() - - // Updates returns a channel that receives an Update when the device changed its status. - Updates() <-chan resourceupdates.Update -} - -// DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices. -type DeviceRunContainerOptions struct { - // The environment variables list. - Envs []kubecontainer.EnvVar - // The mounts for the container. - Mounts []kubecontainer.Mount - // The host devices mapped into the container. - Devices []kubecontainer.DeviceInfo - // The Annotations for the container - Annotations []kubecontainer.Annotation - // CDI Devices for the container - CDIDevices []kubecontainer.CDIDevice -} - -// TODO: evaluate whether we need this error definition. -const ( - errEndpointStopped = "endpoint %v has been stopped" -) - -// endpointStopGracePeriod indicates the grace period after an endpoint is stopped -// because its device plugin fails. DeviceManager keeps the stopped endpoint in its -// cache during this grace period to cover the time gap for the capacity change to -// take effect. -const endpointStopGracePeriod = time.Duration(5) * time.Minute - -// kubeletDeviceManagerCheckpoint is the file name of device plugin checkpoint -const kubeletDeviceManagerCheckpoint = "kubelet_internal_checkpoint" diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/doc.go deleted file mode 100644 index 4f007f7ce10..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cm (abbreviation of "container manager") and its subpackages contain all the kubelet code -// to manage containers. For example, they contain functions to configure containers' cgroups, -// ensure containers run with the desired QoS, and allocate compute resources like cpus, memory, -// devices... -package cm diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/OWNERS deleted file mode 100644 index 74bf9b65e65..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -labels: - - wg/device-management diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/claiminfo.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/claiminfo.go deleted file mode 100644 index b8c2215ccc5..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/claiminfo.go +++ /dev/null @@ -1,222 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dra - -import ( - "errors" - "fmt" - "slices" - "sync" - - resourceapi "k8s.io/api/resource/v1beta1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/kubelet/cm/dra/state" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" -) - -// ClaimInfo holds information required -// to prepare and unprepare a resource claim. -// +k8s:deepcopy-gen=true -type ClaimInfo struct { - state.ClaimInfoState - prepared bool -} - -// claimInfoCache is a cache of processed resource claims keyed by namespace/claimname. -type claimInfoCache struct { - sync.RWMutex - checkpointer state.Checkpointer - claimInfo map[string]*ClaimInfo -} - -// newClaimInfoFromClaim creates a new claim info from a resource claim. -// It verifies that the kubelet can handle the claim. -func newClaimInfoFromClaim(claim *resourceapi.ResourceClaim) (*ClaimInfo, error) { - claimInfoState := state.ClaimInfoState{ - ClaimUID: claim.UID, - ClaimName: claim.Name, - Namespace: claim.Namespace, - PodUIDs: sets.New[string](), - DriverState: make(map[string]state.DriverState), - } - if claim.Status.Allocation == nil { - return nil, errors.New("not allocated") - } - for _, result := range claim.Status.Allocation.Devices.Results { - claimInfoState.DriverState[result.Driver] = state.DriverState{} - } - info := &ClaimInfo{ - ClaimInfoState: claimInfoState, - prepared: false, - } - return info, nil -} - -// newClaimInfoFromClaim creates a new claim info from a checkpointed claim info state object. -func newClaimInfoFromState(state *state.ClaimInfoState) *ClaimInfo { - info := &ClaimInfo{ - ClaimInfoState: *state.DeepCopy(), - prepared: false, - } - return info -} - -// setCDIDevices adds a set of CDI devices to the claim info. -func (info *ClaimInfo) addDevice(driverName string, deviceState state.Device) { - if info.DriverState == nil { - info.DriverState = make(map[string]state.DriverState) - } - driverState := info.DriverState[driverName] - driverState.Devices = append(driverState.Devices, deviceState) - info.DriverState[driverName] = driverState -} - -// addPodReference adds a pod reference to the claim info. -func (info *ClaimInfo) addPodReference(podUID types.UID) { - info.PodUIDs.Insert(string(podUID)) -} - -// hasPodReference checks if a pod reference exists in the claim info. -func (info *ClaimInfo) hasPodReference(podUID types.UID) bool { - return info.PodUIDs.Has(string(podUID)) -} - -// deletePodReference deletes a pod reference from the claim info. -func (info *ClaimInfo) deletePodReference(podUID types.UID) { - info.PodUIDs.Delete(string(podUID)) -} - -// setPrepared marks the claim info as prepared. -func (info *ClaimInfo) setPrepared() { - info.prepared = true -} - -// isPrepared checks if claim info is prepared or not. -func (info *ClaimInfo) isPrepared() bool { - return info.prepared -} - -// newClaimInfoCache creates a new claim info cache object, pre-populated from a checkpoint (if present). -func newClaimInfoCache(stateDir, checkpointName string) (*claimInfoCache, error) { - checkpointer, err := state.NewCheckpointer(stateDir, checkpointName) - if err != nil { - return nil, fmt.Errorf("could not initialize checkpoint manager, please drain node and remove dra state file, err: %w", err) - } - - checkpoint, err := checkpointer.GetOrCreate() - if err != nil { - return nil, fmt.Errorf("error calling GetOrCreate() on checkpoint state: %w", err) - } - - cache := &claimInfoCache{ - checkpointer: checkpointer, - claimInfo: make(map[string]*ClaimInfo), - } - - entries, err := checkpoint.GetClaimInfoStateList() - if err != nil { - return nil, fmt.Errorf("error calling GetEntries() on checkpoint: %w", err) - - } - for _, entry := range entries { - info := newClaimInfoFromState(&entry) - cache.claimInfo[info.Namespace+"/"+info.ClaimName] = info - } - - return cache, nil -} - -// withLock runs a function while holding the claimInfoCache lock. -func (cache *claimInfoCache) withLock(f func() error) error { - cache.Lock() - defer cache.Unlock() - return f() -} - -// withRLock runs a function while holding the claimInfoCache rlock. -func (cache *claimInfoCache) withRLock(f func() error) error { - cache.RLock() - defer cache.RUnlock() - return f() -} - -// add adds a new claim info object into the claim info cache. -func (cache *claimInfoCache) add(info *ClaimInfo) *ClaimInfo { - cache.claimInfo[info.Namespace+"/"+info.ClaimName] = info - return info -} - -// contains checks to see if a specific claim info object is already in the cache. -func (cache *claimInfoCache) contains(claimName, namespace string) bool { - _, exists := cache.claimInfo[namespace+"/"+claimName] - return exists -} - -// get gets a specific claim info object from the cache. -func (cache *claimInfoCache) get(claimName, namespace string) (*ClaimInfo, bool) { - info, exists := cache.claimInfo[namespace+"/"+claimName] - return info, exists -} - -// delete deletes a specific claim info object from the cache. -func (cache *claimInfoCache) delete(claimName, namespace string) { - delete(cache.claimInfo, namespace+"/"+claimName) -} - -// hasPodReference checks if there is at least one claim -// that is referenced by the pod with the given UID -// This function is used indirectly by the status manager -// to check if pod can enter termination status -func (cache *claimInfoCache) hasPodReference(uid types.UID) bool { - for _, claimInfo := range cache.claimInfo { - if claimInfo.hasPodReference(uid) { - return true - } - } - return false -} - -// syncToCheckpoint syncs the full claim info cache state to a checkpoint. -func (cache *claimInfoCache) syncToCheckpoint() error { - claimInfoStateList := make(state.ClaimInfoStateList, 0, len(cache.claimInfo)) - for _, infoClaim := range cache.claimInfo { - claimInfoStateList = append(claimInfoStateList, infoClaim.ClaimInfoState) - } - checkpoint, err := state.NewCheckpoint(claimInfoStateList) - if err != nil { - return err - } - return cache.checkpointer.Store(checkpoint) -} - -// cdiDevicesAsList returns a list of CDIDevices from the provided claim info. -// When the request name is non-empty, only devices relevant for that request -// are returned. -func (info *ClaimInfo) cdiDevicesAsList(requestName string) []kubecontainer.CDIDevice { - var cdiDevices []kubecontainer.CDIDevice - for _, driverData := range info.DriverState { - for _, device := range driverData.Devices { - if requestName == "" || len(device.RequestNames) == 0 || slices.Contains(device.RequestNames, requestName) { - for _, cdiDeviceID := range device.CDIDeviceIDs { - cdiDevices = append(cdiDevices, kubecontainer.CDIDevice{Name: cdiDeviceID}) - } - } - } - } - return cdiDevices -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/manager.go deleted file mode 100644 index 26043a0fd28..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/manager.go +++ /dev/null @@ -1,566 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dra - -import ( - "context" - "fmt" - "strconv" - "time" - - v1 "k8s.io/api/core/v1" - resourceapi "k8s.io/api/resource/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/dynamic-resource-allocation/resourceclaim" - "k8s.io/klog/v2" - drapb "k8s.io/kubelet/pkg/apis/dra/v1beta1" - dra "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin" - "k8s.io/kubernetes/pkg/kubelet/cm/dra/state" - "k8s.io/kubernetes/pkg/kubelet/config" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/metrics" - "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" -) - -// draManagerStateFileName is the file name where dra manager stores its state -const draManagerStateFileName = "dra_manager_state" - -// defaultReconcilePeriod is the default reconciliation period to keep all claim info state in sync. -const defaultReconcilePeriod = 60 * time.Second - -// ActivePodsFunc is a function that returns a list of pods to reconcile. -type ActivePodsFunc func() []*v1.Pod - -// GetNodeFunc is a function that returns the node object using the kubelet's node lister. -type GetNodeFunc func() (*v1.Node, error) - -// ManagerImpl is the structure in charge of managing DRA drivers. -type ManagerImpl struct { - // cache contains cached claim info - cache *claimInfoCache - - // reconcilePeriod is the duration between calls to reconcileLoop. - reconcilePeriod time.Duration - - // activePods is a method for listing active pods on the node - // so all claim info state can be updated in the reconciliation loop. - activePods ActivePodsFunc - - // sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness. - // We use it to determine when we can treat pods as inactive and react appropriately. - sourcesReady config.SourcesReady - - // KubeClient reference - kubeClient clientset.Interface - - // getNode is a function that returns the node object using the kubelet's node lister. - getNode GetNodeFunc -} - -// NewManagerImpl creates a new manager. -func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string, nodeName types.NodeName) (*ManagerImpl, error) { - claimInfoCache, err := newClaimInfoCache(stateFileDirectory, draManagerStateFileName) - if err != nil { - return nil, fmt.Errorf("failed to create claimInfo cache: %w", err) - } - - // TODO: for now the reconcile period is not configurable. - // We should consider making it configurable in the future. - reconcilePeriod := defaultReconcilePeriod - - manager := &ManagerImpl{ - cache: claimInfoCache, - kubeClient: kubeClient, - reconcilePeriod: reconcilePeriod, - activePods: nil, - sourcesReady: nil, - } - - return manager, nil -} - -func (m *ManagerImpl) GetWatcherHandler() cache.PluginHandler { - // The time that DRA drivers have to come back after being unregistered - // before the kubelet removes their ResourceSlices. - // - // This must be long enough to actually allow stopping a pod and - // starting the replacement (otherwise ResourceSlices get deleted - // unnecessarily) and not too long (otherwise the time window were - // pods might still get scheduled to the node after removal of a - // driver is too long). - // - // 30 seconds might be long enough for a simple container restart. - // If a DRA driver wants to be sure that slices don't get wiped, - // it should use rolling updates. - wipingDelay := 30 * time.Second - return cache.PluginHandler(dra.NewRegistrationHandler(m.kubeClient, m.getNode, wipingDelay)) -} - -// Start starts the reconcile loop of the manager. -func (m *ManagerImpl) Start(ctx context.Context, activePods ActivePodsFunc, getNode GetNodeFunc, sourcesReady config.SourcesReady) error { - m.activePods = activePods - m.getNode = getNode - m.sourcesReady = sourcesReady - go wait.UntilWithContext(ctx, func(ctx context.Context) { m.reconcileLoop(ctx) }, m.reconcilePeriod) - return nil -} - -// reconcileLoop ensures that any stale state in the manager's claimInfoCache gets periodically reconciled. -func (m *ManagerImpl) reconcileLoop(ctx context.Context) { - logger := klog.FromContext(ctx) - // Only once all sources are ready do we attempt to reconcile. - // This ensures that the call to m.activePods() below will succeed with - // the actual active pods list. - if m.sourcesReady == nil || !m.sourcesReady.AllReady() { - return - } - - // Get the full list of active pods. - activePods := sets.New[string]() - for _, p := range m.activePods() { - activePods.Insert(string(p.UID)) - } - - // Get the list of inactive pods still referenced by any claimInfos. - type podClaims struct { - uid types.UID - namespace string - claimNames []string - } - inactivePodClaims := make(map[string]*podClaims) - m.cache.RLock() - for _, claimInfo := range m.cache.claimInfo { - for podUID := range claimInfo.PodUIDs { - if activePods.Has(podUID) { - continue - } - if inactivePodClaims[podUID] == nil { - inactivePodClaims[podUID] = &podClaims{ - uid: types.UID(podUID), - namespace: claimInfo.Namespace, - claimNames: []string{}, - } - } - inactivePodClaims[podUID].claimNames = append(inactivePodClaims[podUID].claimNames, claimInfo.ClaimName) - } - } - m.cache.RUnlock() - - // Loop through all inactive pods and call UnprepareResources on them. - for _, podClaims := range inactivePodClaims { - if err := m.unprepareResources(ctx, podClaims.uid, podClaims.namespace, podClaims.claimNames); err != nil { - logger.Info("Unpreparing pod resources in reconcile loop failed, will retry", "podUID", podClaims.uid, "err", err) - } - } -} - -// PrepareResources attempts to prepare all of the required resources -// for the input container, issue NodePrepareResources rpc requests -// for each new resource requirement, process their responses and update the cached -// containerResources on success. -func (m *ManagerImpl) PrepareResources(ctx context.Context, pod *v1.Pod) error { - startTime := time.Now() - err := m.prepareResources(ctx, pod) - metrics.DRAOperationsDuration.WithLabelValues("PrepareResources", strconv.FormatBool(err == nil)).Observe(time.Since(startTime).Seconds()) - return err -} - -func (m *ManagerImpl) prepareResources(ctx context.Context, pod *v1.Pod) error { - logger := klog.FromContext(ctx) - batches := make(map[string][]*drapb.Claim) - resourceClaims := make(map[types.UID]*resourceapi.ResourceClaim) - for i := range pod.Spec.ResourceClaims { - podClaim := &pod.Spec.ResourceClaims[i] - logger.V(3).Info("Processing resource", "pod", klog.KObj(pod), "podClaim", podClaim.Name) - claimName, mustCheckOwner, err := resourceclaim.Name(pod, podClaim) - if err != nil { - return fmt.Errorf("prepare resource claim: %w", err) - } - - if claimName == nil { - // Nothing to do. - logger.V(5).Info("No need to prepare resources, no claim generated", "pod", klog.KObj(pod), "podClaim", podClaim.Name) - continue - } - // Query claim object from the API server - resourceClaim, err := m.kubeClient.ResourceV1beta1().ResourceClaims(pod.Namespace).Get( - ctx, - *claimName, - metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to fetch ResourceClaim %s referenced by pod %s: %w", *claimName, pod.Name, err) - } - - if mustCheckOwner { - if err = resourceclaim.IsForPod(pod, resourceClaim); err != nil { - return err - } - } - - // Check if pod is in the ReservedFor for the claim - if !resourceclaim.IsReservedForPod(pod, resourceClaim) { - return fmt.Errorf("pod %s(%s) is not allowed to use resource claim %s(%s)", - pod.Name, pod.UID, *claimName, resourceClaim.UID) - } - - // Atomically perform some operations on the claimInfo cache. - err = m.cache.withLock(func() error { - // Get a reference to the claim info for this claim from the cache. - // If there isn't one yet, then add it to the cache. - claimInfo, exists := m.cache.get(resourceClaim.Name, resourceClaim.Namespace) - if !exists { - ci, err := newClaimInfoFromClaim(resourceClaim) - if err != nil { - return fmt.Errorf("claim %s: %w", klog.KObj(resourceClaim), err) - } - claimInfo = m.cache.add(ci) - logger.V(6).Info("Created new claim info cache entry", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim), "claimInfoEntry", claimInfo) - } else { - logger.V(6).Info("Found existing claim info cache entry", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim), "claimInfoEntry", claimInfo) - } - - // Add a reference to the current pod in the claim info. - claimInfo.addPodReference(pod.UID) - - // Checkpoint to ensure all claims we plan to prepare are tracked. - // If something goes wrong and the newly referenced pod gets - // deleted without a successful prepare call, we will catch - // that in the reconcile loop and take the appropriate action. - if err := m.cache.syncToCheckpoint(); err != nil { - return fmt.Errorf("failed to checkpoint claimInfo state: %w", err) - } - - // If this claim is already prepared, there is no need to prepare it again. - if claimInfo.isPrepared() { - logger.V(5).Info("Resources already prepared", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim)) - return nil - } - - // This saved claim will be used to update ClaimInfo cache - // after NodePrepareResources GRPC succeeds - resourceClaims[claimInfo.ClaimUID] = resourceClaim - - // Loop through all drivers and prepare for calling NodePrepareResources. - claim := &drapb.Claim{ - Namespace: claimInfo.Namespace, - UID: string(claimInfo.ClaimUID), - Name: claimInfo.ClaimName, - } - for driverName := range claimInfo.DriverState { - batches[driverName] = append(batches[driverName], claim) - } - - return nil - }) - if err != nil { - return fmt.Errorf("locked cache operation: %w", err) - } - } - - // Call NodePrepareResources for all claims in each batch. - // If there is any error, processing gets aborted. - // We could try to continue, but that would make the code more complex. - for driverName, claims := range batches { - // Call NodePrepareResources RPC for all resource handles. - client, err := dra.NewDRAPluginClient(driverName) - if err != nil { - return fmt.Errorf("failed to get gRPC client for driver %s: %w", driverName, err) - } - response, err := client.NodePrepareResources(ctx, &drapb.NodePrepareResourcesRequest{Claims: claims}) - if err != nil { - // General error unrelated to any particular claim. - return fmt.Errorf("NodePrepareResources failed: %w", err) - } - for claimUID, result := range response.Claims { - reqClaim := lookupClaimRequest(claims, claimUID) - if reqClaim == nil { - return fmt.Errorf("NodePrepareResources returned result for unknown claim UID %s", claimUID) - } - if result.GetError() != "" { - return fmt.Errorf("NodePrepareResources failed for claim %s/%s: %s", reqClaim.Namespace, reqClaim.Name, result.Error) - } - - claim := resourceClaims[types.UID(claimUID)] - - // Add the prepared CDI devices to the claim info - err := m.cache.withLock(func() error { - info, exists := m.cache.get(claim.Name, claim.Namespace) - if !exists { - return fmt.Errorf("unable to get claim info for claim %s in namespace %s", claim.Name, claim.Namespace) - } - for _, device := range result.GetDevices() { - info.addDevice(driverName, state.Device{PoolName: device.PoolName, DeviceName: device.DeviceName, RequestNames: device.RequestNames, CDIDeviceIDs: device.CDIDeviceIDs}) - } - return nil - }) - if err != nil { - return fmt.Errorf("locked cache operation: %w", err) - } - } - - unfinished := len(claims) - len(response.Claims) - if unfinished != 0 { - return fmt.Errorf("NodePrepareResources left out %d claims", unfinished) - } - } - - // Atomically perform some operations on the claimInfo cache. - err := m.cache.withLock(func() error { - // Mark all pod claims as prepared. - for _, claim := range resourceClaims { - info, exists := m.cache.get(claim.Name, claim.Namespace) - if !exists { - return fmt.Errorf("unable to get claim info for claim %s in namespace %s", claim.Name, claim.Namespace) - } - info.setPrepared() - } - - // Checkpoint to ensure all prepared claims are tracked with their list - // of CDI devices attached. - if err := m.cache.syncToCheckpoint(); err != nil { - return fmt.Errorf("failed to checkpoint claimInfo state: %w", err) - } - - return nil - }) - if err != nil { - return fmt.Errorf("locked cache operation: %w", err) - } - - return nil -} - -func lookupClaimRequest(claims []*drapb.Claim, claimUID string) *drapb.Claim { - for _, claim := range claims { - if claim.UID == claimUID { - return claim - } - } - return nil -} - -// GetResources gets a ContainerInfo object from the claimInfo cache. -// This information is used by the caller to update a container config. -func (m *ManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*ContainerInfo, error) { - cdiDevices := []kubecontainer.CDIDevice{} - - for i := range pod.Spec.ResourceClaims { - podClaim := &pod.Spec.ResourceClaims[i] - claimName, _, err := resourceclaim.Name(pod, podClaim) - if err != nil { - return nil, fmt.Errorf("list resource claims: %w", err) - } - // The claim name might be nil if no underlying resource claim - // was generated for the referenced claim. There are valid use - // cases when this might happen, so we simply skip it. - if claimName == nil { - continue - } - for _, claim := range container.Resources.Claims { - if podClaim.Name != claim.Name { - continue - } - - err := m.cache.withRLock(func() error { - claimInfo, exists := m.cache.get(*claimName, pod.Namespace) - if !exists { - return fmt.Errorf("unable to get claim info for claim %s in namespace %s", *claimName, pod.Namespace) - } - - // As of Kubernetes 1.31, CDI device IDs are not passed via annotations anymore. - cdiDevices = append(cdiDevices, claimInfo.cdiDevicesAsList(claim.Request)...) - - return nil - }) - if err != nil { - return nil, fmt.Errorf("locked cache operation: %w", err) - } - } - } - return &ContainerInfo{CDIDevices: cdiDevices}, nil -} - -// UnprepareResources calls a driver's NodeUnprepareResource API for each resource claim owned by a pod. -// This function is idempotent and may be called multiple times against the same pod. -// As such, calls to the underlying NodeUnprepareResource API are skipped for claims that have -// already been successfully unprepared. -func (m *ManagerImpl) UnprepareResources(ctx context.Context, pod *v1.Pod) error { - var err error = nil - defer func(startTime time.Time) { - metrics.DRAOperationsDuration.WithLabelValues("UnprepareResources", strconv.FormatBool(err != nil)).Observe(time.Since(startTime).Seconds()) - }(time.Now()) - var claimNames []string - for i := range pod.Spec.ResourceClaims { - claimName, _, err := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i]) - if err != nil { - return fmt.Errorf("unprepare resource claim: %w", err) - } - // The claim name might be nil if no underlying resource claim - // was generated for the referenced claim. There are valid use - // cases when this might happen, so we simply skip it. - if claimName == nil { - continue - } - claimNames = append(claimNames, *claimName) - } - err = m.unprepareResources(ctx, pod.UID, pod.Namespace, claimNames) - return err -} - -func (m *ManagerImpl) unprepareResources(ctx context.Context, podUID types.UID, namespace string, claimNames []string) error { - logger := klog.FromContext(ctx) - batches := make(map[string][]*drapb.Claim) - claimNamesMap := make(map[types.UID]string) - for _, claimName := range claimNames { - // Atomically perform some operations on the claimInfo cache. - err := m.cache.withLock(func() error { - // Get the claim info from the cache - claimInfo, exists := m.cache.get(claimName, namespace) - - // Skip calling NodeUnprepareResource if claim info is not cached - if !exists { - return nil - } - - // Skip calling NodeUnprepareResource if other pods are still referencing it - if len(claimInfo.PodUIDs) > 1 { - // We delay checkpointing of this change until - // UnprepareResources returns successfully. It is OK to do - // this because we will only return successfully from this call - // if the checkpoint has succeeded. That means if the kubelet - // is ever restarted before this checkpoint succeeds, we will - // simply call into this (idempotent) function again. - claimInfo.deletePodReference(podUID) - return nil - } - - // This claimInfo name will be used to update ClaimInfo cache - // after NodeUnprepareResources GRPC succeeds - claimNamesMap[claimInfo.ClaimUID] = claimInfo.ClaimName - - // Loop through all drivers and prepare for calling NodeUnprepareResources. - claim := &drapb.Claim{ - Namespace: claimInfo.Namespace, - UID: string(claimInfo.ClaimUID), - Name: claimInfo.ClaimName, - } - for driverName := range claimInfo.DriverState { - batches[driverName] = append(batches[driverName], claim) - } - - return nil - }) - if err != nil { - return fmt.Errorf("locked cache operation: %w", err) - } - } - - // Call NodeUnprepareResources for all claims in each batch. - // If there is any error, processing gets aborted. - // We could try to continue, but that would make the code more complex. - for driverName, claims := range batches { - // Call NodeUnprepareResources RPC for all resource handles. - client, err := dra.NewDRAPluginClient(driverName) - if err != nil { - return fmt.Errorf("get gRPC client for DRA driver %s: %w", driverName, err) - } - response, err := client.NodeUnprepareResources(ctx, &drapb.NodeUnprepareResourcesRequest{Claims: claims}) - if err != nil { - // General error unrelated to any particular claim. - return fmt.Errorf("NodeUnprepareResources failed: %w", err) - } - - for claimUID, result := range response.Claims { - reqClaim := lookupClaimRequest(claims, claimUID) - if reqClaim == nil { - return fmt.Errorf("NodeUnprepareResources returned result for unknown claim UID %s", claimUID) - } - if result.GetError() != "" { - return fmt.Errorf("NodeUnprepareResources failed for claim %s/%s: %s", reqClaim.Namespace, reqClaim.Name, result.Error) - } - } - - unfinished := len(claims) - len(response.Claims) - if unfinished != 0 { - return fmt.Errorf("NodeUnprepareResources left out %d claims", unfinished) - } - } - - // Atomically perform some operations on the claimInfo cache. - err := m.cache.withLock(func() error { - // Delete all claimInfos from the cache that have just been unprepared. - for _, claimName := range claimNamesMap { - claimInfo, _ := m.cache.get(claimName, namespace) - m.cache.delete(claimName, namespace) - logger.V(6).Info("Deleted claim info cache entry", "claim", klog.KRef(namespace, claimName), "claimInfoEntry", claimInfo) - } - - // Atomically sync the cache back to the checkpoint. - if err := m.cache.syncToCheckpoint(); err != nil { - return fmt.Errorf("failed to checkpoint claimInfo state: %w", err) - } - return nil - }) - if err != nil { - return fmt.Errorf("locked cache operation: %w", err) - } - - return nil -} - -// PodMightNeedToUnprepareResources returns true if the pod might need to -// unprepare resources -func (m *ManagerImpl) PodMightNeedToUnprepareResources(uid types.UID) bool { - m.cache.Lock() - defer m.cache.Unlock() - return m.cache.hasPodReference(uid) -} - -// GetContainerClaimInfos gets Container's ClaimInfo -func (m *ManagerImpl) GetContainerClaimInfos(pod *v1.Pod, container *v1.Container) ([]*ClaimInfo, error) { - claimInfos := make([]*ClaimInfo, 0, len(pod.Spec.ResourceClaims)) - - for i, podResourceClaim := range pod.Spec.ResourceClaims { - claimName, _, err := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i]) - if err != nil { - return nil, fmt.Errorf("determine resource claim information: %w", err) - } - - for _, claim := range container.Resources.Claims { - if podResourceClaim.Name != claim.Name { - continue - } - - err := m.cache.withRLock(func() error { - claimInfo, exists := m.cache.get(*claimName, pod.Namespace) - if !exists { - return fmt.Errorf("unable to get claim info for claim %s in namespace %s", *claimName, pod.Namespace) - } - claimInfos = append(claimInfos, claimInfo.DeepCopy()) - return nil - }) - if err != nil { - return nil, fmt.Errorf("locked cache operation: %w", err) - } - } - } - return claimInfos, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/plugin.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/plugin.go deleted file mode 100644 index 4f7b7a99e80..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/plugin.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugin - -import ( - "context" - "errors" - "fmt" - "net" - "sync" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/status" - - "k8s.io/klog/v2" - drapbv1alpha4 "k8s.io/kubelet/pkg/apis/dra/v1alpha4" - drapbv1beta1 "k8s.io/kubelet/pkg/apis/dra/v1beta1" - "k8s.io/kubernetes/pkg/kubelet/metrics" -) - -// NewDRAPluginClient returns a wrapper around those gRPC methods of a DRA -// driver kubelet plugin which need to be called by kubelet. The wrapper -// handles gRPC connection management and logging. Connections are reused -// across different NewDRAPluginClient calls. -func NewDRAPluginClient(pluginName string) (*Plugin, error) { - if pluginName == "" { - return nil, fmt.Errorf("plugin name is empty") - } - - existingPlugin := draPlugins.get(pluginName) - if existingPlugin == nil { - return nil, fmt.Errorf("plugin name %s not found in the list of registered DRA plugins", pluginName) - } - - return existingPlugin, nil -} - -type Plugin struct { - name string - backgroundCtx context.Context - cancel func(cause error) - - mutex sync.Mutex - conn *grpc.ClientConn - endpoint string - chosenService string // e.g. drapbv1beta1.DRAPluginService - clientCallTimeout time.Duration -} - -func (p *Plugin) getOrCreateGRPCConn() (*grpc.ClientConn, error) { - p.mutex.Lock() - defer p.mutex.Unlock() - - if p.conn != nil { - return p.conn, nil - } - - ctx := p.backgroundCtx - logger := klog.FromContext(ctx) - - network := "unix" - logger.V(4).Info("Creating new gRPC connection", "protocol", network, "endpoint", p.endpoint) - // grpc.Dial is deprecated. grpc.NewClient should be used instead. - // For now this gets ignored because this function is meant to establish - // the connection, with the one second timeout below. Perhaps that - // approach should be reconsidered? - //nolint:staticcheck - conn, err := grpc.Dial( - p.endpoint, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithContextDialer(func(ctx context.Context, target string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, target) - }), - grpc.WithChainUnaryInterceptor(newMetricsInterceptor(p.name)), - ) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - if ok := conn.WaitForStateChange(ctx, connectivity.Connecting); !ok { - return nil, errors.New("timed out waiting for gRPC connection to be ready") - } - - p.conn = conn - return p.conn, nil -} - -func (p *Plugin) NodePrepareResources( - ctx context.Context, - req *drapbv1beta1.NodePrepareResourcesRequest, - opts ...grpc.CallOption, -) (*drapbv1beta1.NodePrepareResourcesResponse, error) { - logger := klog.FromContext(ctx) - logger.V(4).Info("Calling NodePrepareResources rpc", "request", req) - - conn, err := p.getOrCreateGRPCConn() - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(ctx, p.clientCallTimeout) - defer cancel() - - var response *drapbv1beta1.NodePrepareResourcesResponse - switch p.chosenService { - case drapbv1beta1.DRAPluginService: - nodeClient := drapbv1beta1.NewDRAPluginClient(conn) - response, err = nodeClient.NodePrepareResources(ctx, req) - case drapbv1alpha4.NodeService: - nodeClient := drapbv1alpha4.NewNodeClient(conn) - response, err = drapbv1alpha4.V1Alpha4ClientWrapper{NodeClient: nodeClient}.NodePrepareResources(ctx, req) - default: - // Shouldn't happen, validateSupportedServices should only - // return services we support here. - return nil, fmt.Errorf("internal error: unsupported chosen service: %q", p.chosenService) - } - logger.V(4).Info("Done calling NodePrepareResources rpc", "response", response, "err", err) - return response, err -} - -func (p *Plugin) NodeUnprepareResources( - ctx context.Context, - req *drapbv1beta1.NodeUnprepareResourcesRequest, - opts ...grpc.CallOption, -) (*drapbv1beta1.NodeUnprepareResourcesResponse, error) { - logger := klog.FromContext(ctx) - logger.V(4).Info("Calling NodeUnprepareResource rpc", "request", req) - - conn, err := p.getOrCreateGRPCConn() - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(ctx, p.clientCallTimeout) - defer cancel() - - var response *drapbv1beta1.NodeUnprepareResourcesResponse - switch p.chosenService { - case drapbv1beta1.DRAPluginService: - nodeClient := drapbv1beta1.NewDRAPluginClient(conn) - response, err = nodeClient.NodeUnprepareResources(ctx, req) - case drapbv1alpha4.NodeService: - nodeClient := drapbv1alpha4.NewNodeClient(conn) - response, err = drapbv1alpha4.V1Alpha4ClientWrapper{NodeClient: nodeClient}.NodeUnprepareResources(ctx, req) - default: - // Shouldn't happen, validateSupportedServices should only - // return services we support here. - return nil, fmt.Errorf("internal error: unsupported chosen service: %q", p.chosenService) - } - logger.V(4).Info("Done calling NodeUnprepareResources rpc", "response", response, "err", err) - return response, err -} - -func newMetricsInterceptor(pluginName string) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply any, conn *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - start := time.Now() - err := invoker(ctx, method, req, reply, conn, opts...) - metrics.DRAGRPCOperationsDuration.WithLabelValues(pluginName, method, status.Code(err).String()).Observe(time.Since(start).Seconds()) - return err - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/plugins_store.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/plugins_store.go deleted file mode 100644 index cc2fc240b5b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/plugins_store.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugin - -import ( - "errors" - "fmt" - "slices" - "sync" -) - -// PluginsStore holds a list of DRA Plugins. -type pluginsStore struct { - sync.RWMutex - // plugin name -> Plugin in the order in which they got added - store map[string][]*Plugin -} - -// draPlugins map keeps track of all registered DRA plugins on the node -// and their corresponding sockets. -var draPlugins = &pluginsStore{} - -// Get lets you retrieve a DRA Plugin by name. -// This method is protected by a mutex. -func (s *pluginsStore) get(pluginName string) *Plugin { - s.RLock() - defer s.RUnlock() - - instances := s.store[pluginName] - if len(instances) == 0 { - return nil - } - // Heuristic: pick the most recent one. It's most likely - // the newest, except when kubelet got restarted and registered - // all running plugins in random order. - return instances[len(instances)-1] -} - -// Set lets you save a DRA Plugin to the list and give it a specific name. -// This method is protected by a mutex. -func (s *pluginsStore) add(p *Plugin) error { - s.Lock() - defer s.Unlock() - - if s.store == nil { - s.store = make(map[string][]*Plugin) - } - for _, oldP := range s.store[p.name] { - if oldP.endpoint == p.endpoint { - // One plugin instance cannot hijack the endpoint of another instance. - return fmt.Errorf("endpoint %s already registered for plugin %s", p.endpoint, p.name) - } - } - s.store[p.name] = append(s.store[p.name], p) - return nil -} - -// remove lets you remove one endpoint for a DRA Plugin. -// This method is protected by a mutex. It returns the -// plugin if found and true if that was the last instance -func (s *pluginsStore) remove(pluginName, endpoint string) (*Plugin, bool) { - s.Lock() - defer s.Unlock() - - instances := s.store[pluginName] - i := slices.IndexFunc(instances, func(p *Plugin) bool { return p.endpoint == endpoint }) - if i == -1 { - return nil, false - } - p := instances[i] - last := len(instances) == 1 - if last { - delete(s.store, pluginName) - } else { - s.store[pluginName] = slices.Delete(instances, i, i+1) - } - - if p.cancel != nil { - p.cancel(errors.New("plugin got removed")) - } - return p, last -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/registration.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/registration.go deleted file mode 100644 index 0f410c4290b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/registration.go +++ /dev/null @@ -1,345 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugin - -import ( - "context" - "errors" - "fmt" - "slices" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - resourceapi "k8s.io/api/resource/v1beta1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - drapbv1alpha4 "k8s.io/kubelet/pkg/apis/dra/v1alpha4" - drapbv1beta1 "k8s.io/kubelet/pkg/apis/dra/v1beta1" - "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" -) - -// defaultClientCallTimeout is the default amount of time that a DRA driver has -// to respond to any of the gRPC calls. kubelet uses this value by passing nil -// to RegisterPlugin. Some tests use a different, usually shorter timeout to -// speed up testing. -// -// This is half of the kubelet retry period (according to -// https://github.com/kubernetes/kubernetes/commit/0449cef8fd5217d394c5cd331d852bd50983e6b3). -const defaultClientCallTimeout = 45 * time.Second - -// RegistrationHandler is the handler which is fed to the pluginwatcher API. -type RegistrationHandler struct { - // backgroundCtx is used for all future activities of the handler. - // This is necessary because it implements APIs which don't - // provide a context. - backgroundCtx context.Context - cancel func(err error) - kubeClient kubernetes.Interface - getNode func() (*v1.Node, error) - wipingDelay time.Duration - - wg sync.WaitGroup - mutex sync.Mutex - - // pendingWipes maps a plugin name to a cancel function for - // wiping of that plugin's ResourceSlices. Entries get added - // in DeRegisterPlugin and check in RegisterPlugin. If - // wiping is pending during RegisterPlugin, it gets canceled. - // - // Must use pointers to functions because the entries have to - // be comparable. - pendingWipes map[string]*context.CancelCauseFunc -} - -var _ cache.PluginHandler = &RegistrationHandler{} - -// NewPluginHandler returns new registration handler. -// -// Must only be called once per process because it manages global state. -// If a kubeClient is provided, then it synchronizes ResourceSlices -// with the resource information provided by plugins. -func NewRegistrationHandler(kubeClient kubernetes.Interface, getNode func() (*v1.Node, error), wipingDelay time.Duration) *RegistrationHandler { - // The context and thus logger should come from the caller. - return newRegistrationHandler(context.TODO(), kubeClient, getNode, wipingDelay) -} - -func newRegistrationHandler(ctx context.Context, kubeClient kubernetes.Interface, getNode func() (*v1.Node, error), wipingDelay time.Duration) *RegistrationHandler { - ctx, cancel := context.WithCancelCause(ctx) - handler := &RegistrationHandler{ - backgroundCtx: klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "DRA registration handler")), - cancel: cancel, - kubeClient: kubeClient, - getNode: getNode, - wipingDelay: wipingDelay, - pendingWipes: make(map[string]*context.CancelCauseFunc), - } - - // When kubelet starts up, no DRA driver has registered yet. None of - // the drivers are usable until they come back, which might not happen - // at all. Therefore it is better to not advertise any local resources - // because pods could get stuck on the node waiting for the driver - // to start up. - // - // This has to run in the background. - handler.wg.Add(1) - go func() { - defer handler.wg.Done() - - logger := klog.LoggerWithName(klog.FromContext(handler.backgroundCtx), "startup") - ctx := klog.NewContext(handler.backgroundCtx, logger) - handler.wipeResourceSlices(ctx, 0 /* no delay */, "" /* all drivers */) - }() - - return handler -} - -// Stop cancels any remaining background activities and blocks until all goroutines have stopped. -func (h *RegistrationHandler) Stop() { - h.cancel(errors.New("Stop was called")) - h.wg.Wait() -} - -// wipeResourceSlices deletes ResourceSlices of the node, optionally just for a specific driver. -// Wiping will delay for a while and can be canceled by canceling the context. -func (h *RegistrationHandler) wipeResourceSlices(ctx context.Context, delay time.Duration, driver string) { - if h.kubeClient == nil { - return - } - logger := klog.FromContext(ctx) - - if delay != 0 { - // Before we start deleting, give the driver time to bounce back. - // Perhaps it got removed as part of a DaemonSet update and the - // replacement pod is about to start. - logger.V(4).Info("Starting to wait before wiping ResourceSlices", "delay", delay) - select { - case <-ctx.Done(): - logger.V(4).Info("Aborting wiping of ResourceSlices", "reason", context.Cause(ctx)) - case <-time.After(delay): - logger.V(4).Info("Starting to wipe ResourceSlices after waiting", "delay", delay) - } - } - - backoff := wait.Backoff{ - Duration: time.Second, - Factor: 2, - Jitter: 0.2, - Cap: 5 * time.Minute, - Steps: 100, - } - - // Error logging is done inside the loop. Context cancellation doesn't get logged. - _ = wait.ExponentialBackoffWithContext(ctx, backoff, func(ctx context.Context) (bool, error) { - node, err := h.getNode() - if apierrors.IsNotFound(err) { - return false, nil - } - if err != nil { - logger.Error(err, "Unexpected error checking for node") - return false, nil - } - fieldSelector := fields.Set{resourceapi.ResourceSliceSelectorNodeName: node.Name} - if driver != "" { - fieldSelector[resourceapi.ResourceSliceSelectorDriver] = driver - } - - err = h.kubeClient.ResourceV1beta1().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: fieldSelector.String()}) - switch { - case err == nil: - logger.V(3).Info("Deleted ResourceSlices", "fieldSelector", fieldSelector) - return true, nil - case apierrors.IsUnauthorized(err): - // This can happen while kubelet is still figuring out - // its credentials. - logger.V(5).Info("Deleting ResourceSlice failed, retrying", "fieldSelector", fieldSelector, "err", err) - return false, nil - default: - // Log and retry for other errors. - logger.V(3).Info("Deleting ResourceSlice failed, retrying", "fieldSelector", fieldSelector, "err", err) - return false, nil - } - }) -} - -// RegisterPlugin is called when a plugin can be registered. -// -// DRA uses the version array in the registration API to enumerate all gRPC -// services that the plugin provides, using the "." format (e.g. "v1beta1.DRAPlugin"). This allows kubelet to determine -// in advance which version to use resp. which optional services the plugin -// supports. -func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, supportedServices []string, pluginClientTimeout *time.Duration) error { - // Prepare a context with its own logger for the plugin. - // - // The lifecycle of the plugin's background activities is tied to our - // root context, so canceling that will also cancel the plugin. - // - // The logger injects the plugin name as additional value - // into all log output related to the plugin. - ctx := h.backgroundCtx - logger := klog.FromContext(ctx) - logger = klog.LoggerWithValues(logger, "pluginName", pluginName, "endpoint", endpoint) - ctx = klog.NewContext(ctx, logger) - - logger.V(3).Info("Register new DRA plugin") - - chosenService, err := h.validateSupportedServices(pluginName, supportedServices) - if err != nil { - return fmt.Errorf("version check of plugin %s failed: %w", pluginName, err) - } - - var timeout time.Duration - if pluginClientTimeout == nil { - timeout = defaultClientCallTimeout - } else { - timeout = *pluginClientTimeout - } - - ctx, cancel := context.WithCancelCause(ctx) - - pluginInstance := &Plugin{ - name: pluginName, - backgroundCtx: ctx, - cancel: cancel, - conn: nil, - endpoint: endpoint, - chosenService: chosenService, - clientCallTimeout: timeout, - } - - // Storing endpoint of newly registered DRA Plugin into the map, where plugin name will be the key - // all other DRA components will be able to get the actual socket of DRA plugins by its name. - if err := draPlugins.add(pluginInstance); err != nil { - cancel(err) - // No wrapping, the error already contains details. - return err - } - - // Now cancel any pending ResourceSlice wiping for this plugin. - // Only needs to be done once. - h.mutex.Lock() - defer h.mutex.Unlock() - if cancel := h.pendingWipes[pluginName]; cancel != nil { - (*cancel)(errors.New("new plugin instance registered")) - delete(h.pendingWipes, pluginName) - } - - return nil -} - -// validateSupportedServices identifies the highest supported gRPC service for -// NodePrepareResources and NodeUnprepareResources and returns its name -// (e.g. [drapbv1beta1.DRAPluginService]). An error is returned if the plugin -// is unusable. -func (h *RegistrationHandler) validateSupportedServices(pluginName string, supportedServices []string) (string, error) { - if len(supportedServices) == 0 { - return "", errors.New("empty list of supported gRPC services (aka supported versions)") - } - - // Pick most recent version if available. - chosenService := "" - for _, service := range []string{ - // Sorted by most recent first, oldest last. - drapbv1beta1.DRAPluginService, - drapbv1alpha4.NodeService, - } { - if slices.Contains(supportedServices, service) { - chosenService = service - break - } - } - - // Fall back to alpha if necessary because - // plugins at that time didn't advertise gRPC services. - if chosenService == "" { - chosenService = drapbv1alpha4.NodeService - } - - return chosenService, nil -} - -// DeRegisterPlugin is called when a plugin has removed its socket, -// signaling it is no longer available. -func (h *RegistrationHandler) DeRegisterPlugin(pluginName, endpoint string) { - if p, last := draPlugins.remove(pluginName, endpoint); p != nil { - // This logger includes endpoint and pluginName. - logger := klog.FromContext(p.backgroundCtx) - logger.V(3).Info("Deregister DRA plugin", "lastInstance", last) - if !last { - return - } - - // Prepare for canceling the background wiping. This needs to run - // in the context of the registration handler, the one from - // the plugin is canceled. - logger = klog.FromContext(h.backgroundCtx) - logger = klog.LoggerWithName(logger, "driver-cleanup") - logger = klog.LoggerWithValues(logger, "pluginName", pluginName) - ctx, cancel := context.WithCancelCause(h.backgroundCtx) - ctx = klog.NewContext(ctx, logger) - - // Clean up the ResourceSlices for the deleted Plugin since it - // may have died without doing so itself and might never come - // back. - // - // May get canceled if the plugin comes back quickly enough - // (see RegisterPlugin). - h.mutex.Lock() - defer h.mutex.Unlock() - if cancel := h.pendingWipes[pluginName]; cancel != nil { - (*cancel)(errors.New("plugin deregistered a second time")) - } - h.pendingWipes[pluginName] = &cancel - - h.wg.Add(1) - go func() { - defer h.wg.Done() - defer func() { - h.mutex.Lock() - defer h.mutex.Unlock() - - // Cancel our own context, but remove it from the map only if it - // is the current entry. Perhaps it already got replaced. - cancel(errors.New("wiping done")) - if h.pendingWipes[pluginName] == &cancel { - delete(h.pendingWipes, pluginName) - } - }() - h.wipeResourceSlices(ctx, h.wipingDelay, pluginName) - }() - return - } - - logger := klog.FromContext(h.backgroundCtx) - logger.V(3).Info("Deregister DRA plugin not necessary, was already removed") -} - -// ValidatePlugin is called by kubelet's plugin watcher upon detection -// of a new registration socket opened by DRA plugin. -func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, supportedServices []string) error { - _, err := h.validateSupportedServices(pluginName, supportedServices) - if err != nil { - return fmt.Errorf("invalid versions of plugin %s: %w", pluginName, err) - } - - return err -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/checkpoint.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/checkpoint.go deleted file mode 100644 index bfbae956bbc..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/checkpoint.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "encoding/json" - "hash/crc32" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" -) - -const ( - CheckpointAPIGroup = "checkpoint.dra.kubelet.k8s.io" - CheckpointKind = "DRACheckpoint" - CheckpointAPIVersion = CheckpointAPIGroup + "/v1" -) - -// Checkpoint represents a structure to store DRA checkpoint data -type Checkpoint struct { - // Data is a JSON serialized checkpoint data - Data string - // Checksum is a checksum of Data - Checksum uint32 -} - -type CheckpointData struct { - metav1.TypeMeta - ClaimInfoStateList ClaimInfoStateList -} - -// NewCheckpoint creates a new checkpoint from a list of claim info states -func NewCheckpoint(data ClaimInfoStateList) (*Checkpoint, error) { - cpData := &CheckpointData{ - TypeMeta: metav1.TypeMeta{ - Kind: CheckpointKind, - APIVersion: CheckpointAPIVersion, - }, - ClaimInfoStateList: data, - } - - cpDataBytes, err := json.Marshal(cpData) - if err != nil { - return nil, err - } - - cp := &Checkpoint{ - Data: string(cpDataBytes), - Checksum: crc32.ChecksumIEEE(cpDataBytes), - } - - return cp, nil -} - -// MarshalCheckpoint marshals checkpoint to JSON -func (cp *Checkpoint) MarshalCheckpoint() ([]byte, error) { - return json.Marshal(cp) -} - -// UnmarshalCheckpoint unmarshals checkpoint from JSON -// and verifies its data checksum -func (cp *Checkpoint) UnmarshalCheckpoint(blob []byte) error { - if err := json.Unmarshal(blob, cp); err != nil { - return err - } - - // verify checksum - if err := cp.VerifyChecksum(); err != nil { - return err - } - - return nil -} - -// VerifyChecksum verifies that current checksum -// of checkpointed Data is valid -func (cp *Checkpoint) VerifyChecksum() error { - expectedCS := crc32.ChecksumIEEE([]byte(cp.Data)) - if expectedCS != cp.Checksum { - return &errors.CorruptCheckpointError{ActualCS: uint64(cp.Checksum), ExpectedCS: uint64(expectedCS)} - } - return nil -} - -// GetClaimInfoStateList returns list of claim info states from checkpoint -func (cp *Checkpoint) GetClaimInfoStateList() (ClaimInfoStateList, error) { - var data CheckpointData - if err := json.Unmarshal([]byte(cp.Data), &data); err != nil { - return nil, err - } - - return data.ClaimInfoStateList, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/checkpointer.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/checkpointer.go deleted file mode 100644 index 62f98029f85..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/checkpointer.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "errors" - "fmt" - "sync" - - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" - checkpointerrors "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" -) - -type Checkpointer interface { - GetOrCreate() (*Checkpoint, error) - Store(*Checkpoint) error -} - -type checkpointer struct { - sync.RWMutex - checkpointManager checkpointmanager.CheckpointManager - checkpointName string -} - -// NewCheckpointer creates new checkpointer for keeping track of claim info with checkpoint backend -func NewCheckpointer(stateDir, checkpointName string) (Checkpointer, error) { - if len(checkpointName) == 0 { - return nil, fmt.Errorf("received empty string instead of checkpointName") - } - - checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir) - if err != nil { - return nil, fmt.Errorf("failed to initialize checkpoint manager: %w", err) - } - - checkpointer := &checkpointer{ - checkpointManager: checkpointManager, - checkpointName: checkpointName, - } - - return checkpointer, nil -} - -// GetOrCreate gets list of claim info states from a checkpoint -// or creates empty list if checkpoint doesn't exist -func (sc *checkpointer) GetOrCreate() (*Checkpoint, error) { - sc.Lock() - defer sc.Unlock() - - checkpoint, err := NewCheckpoint(nil) - if err != nil { - return nil, fmt.Errorf("failed to create new checkpoint: %w", err) - } - - err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpoint) - if errors.Is(err, checkpointerrors.ErrCheckpointNotFound) { - err = sc.store(checkpoint) - if err != nil { - return nil, fmt.Errorf("failed to store checkpoint %v: %w", sc.checkpointName, err) - } - return checkpoint, nil - } - if err != nil { - return nil, fmt.Errorf("failed to get checkpoint %v: %w", sc.checkpointName, err) - } - - return checkpoint, nil -} - -// Store stores checkpoint to the file -func (sc *checkpointer) Store(checkpoint *Checkpoint) error { - sc.Lock() - defer sc.Unlock() - - return sc.store(checkpoint) -} - -// store saves state to a checkpoint, caller is responsible for locking -func (sc *checkpointer) store(checkpoint *Checkpoint) error { - if err := sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint); err != nil { - return fmt.Errorf("could not save checkpoint %s: %w", sc.checkpointName, err) - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/state.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/state.go deleted file mode 100644 index 045a9b6b2f4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/state.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" -) - -type ClaimInfoStateList []ClaimInfoState - -// +k8s:deepcopy-gen=true -type ClaimInfoState struct { - // ClaimUID is the UID of a resource claim - ClaimUID types.UID - - // ClaimName is the name of a resource claim - ClaimName string - - // Namespace is a claim namespace - Namespace string - - // PodUIDs is a set of pod UIDs that reference a resource - PodUIDs sets.Set[string] - - // DriverState contains information about all drivers which have allocation - // results in the claim, even if they don't provide devices for their results. - DriverState map[string]DriverState -} - -// DriverState is used to store per-device claim info state in a checkpoint -// +k8s:deepcopy-gen=true -type DriverState struct { - Devices []Device -} - -// Device is how a DRA driver described an allocated device in a claim -// to kubelet. RequestName and CDI device IDs are optional. -// +k8s:deepcopy-gen=true -type Device struct { - PoolName string - DeviceName string - RequestNames []string - CDIDeviceIDs []string -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/zz_generated.deepcopy.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/zz_generated.deepcopy.go deleted file mode 100644 index 6489dab7a51..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/state/zz_generated.deepcopy.go +++ /dev/null @@ -1,105 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package state - -import ( - sets "k8s.io/apimachinery/pkg/util/sets" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimInfoState) DeepCopyInto(out *ClaimInfoState) { - *out = *in - if in.PodUIDs != nil { - in, out := &in.PodUIDs, &out.PodUIDs - *out = make(sets.Set[string], len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.DriverState != nil { - in, out := &in.DriverState, &out.DriverState - *out = make(map[string]DriverState, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimInfoState. -func (in *ClaimInfoState) DeepCopy() *ClaimInfoState { - if in == nil { - return nil - } - out := new(ClaimInfoState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Device) DeepCopyInto(out *Device) { - *out = *in - if in.RequestNames != nil { - in, out := &in.RequestNames, &out.RequestNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.CDIDeviceIDs != nil { - in, out := &in.CDIDeviceIDs, &out.CDIDeviceIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. -func (in *Device) DeepCopy() *Device { - if in == nil { - return nil - } - out := new(Device) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverState) DeepCopyInto(out *DriverState) { - *out = *in - if in.Devices != nil { - in, out := &in.Devices, &out.Devices - *out = make([]Device, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverState. -func (in *DriverState) DeepCopy() *DriverState { - if in == nil { - return nil - } - out := new(DriverState) - in.DeepCopyInto(out) - return out -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/types.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/types.go deleted file mode 100644 index eafd4914e16..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/types.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dra - -import ( - "context" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/kubelet/config" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" -) - -// Manager manages all the DRA resource plugins running on a node. -type Manager interface { - // GetWatcherHandler returns the plugin handler for the DRA. - GetWatcherHandler() cache.PluginHandler - - // Start starts the reconcile loop of the manager. - // This will ensure that all claims are unprepared even if pods get deleted unexpectedly. - Start(ctx context.Context, activePods ActivePodsFunc, getNode GetNodeFunc, sourcesReady config.SourcesReady) error - - // PrepareResources prepares resources for a pod. - // It communicates with the DRA resource plugin to prepare resources. - PrepareResources(ctx context.Context, pod *v1.Pod) error - - // UnprepareResources calls NodeUnprepareResource GRPC from DRA plugin to unprepare pod resources - UnprepareResources(ctx context.Context, pod *v1.Pod) error - - // GetResources gets a ContainerInfo object from the claimInfo cache. - // This information is used by the caller to update a container config. - GetResources(pod *v1.Pod, container *v1.Container) (*ContainerInfo, error) - - // PodMightNeedToUnprepareResources returns true if the pod with the given UID - // might need to unprepare resources. - PodMightNeedToUnprepareResources(UID types.UID) bool - - // GetContainerClaimInfos gets Container ClaimInfo objects - GetContainerClaimInfos(pod *v1.Pod, container *v1.Container) ([]*ClaimInfo, error) -} - -// ContainerInfo contains information required by the runtime to consume prepared resources. -type ContainerInfo struct { - // CDI Devices for the container - CDIDevices []kubecontainer.CDIDevice -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/zz_generated.deepcopy.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/zz_generated.deepcopy.go deleted file mode 100644 index 577fd431881..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/zz_generated.deepcopy.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package dra - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimInfo) DeepCopyInto(out *ClaimInfo) { - *out = *in - in.ClaimInfoState.DeepCopyInto(&out.ClaimInfoState) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimInfo. -func (in *ClaimInfo) DeepCopy() *ClaimInfo { - if in == nil { - return nil - } - out := new(ClaimInfo) - in.DeepCopyInto(out) - return out -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_container_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_container_manager.go deleted file mode 100644 index dee9ccc22fe..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_container_manager.go +++ /dev/null @@ -1,278 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "context" - "sync" - - v1 "k8s.io/api/core/v1" - - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apiserver/pkg/server/healthz" - internalapi "k8s.io/cri-api/pkg/apis" - podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" - "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager" - "k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/config" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" - "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" - "k8s.io/kubernetes/pkg/kubelet/status" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" -) - -type FakeContainerManager struct { - sync.Mutex - CalledFunctions []string - PodContainerManager *FakePodContainerManager - shouldResetExtendedResourceCapacity bool -} - -var _ ContainerManager = &FakeContainerManager{} - -func NewFakeContainerManager() *FakeContainerManager { - return &FakeContainerManager{ - PodContainerManager: NewFakePodContainerManager(), - } -} - -func (cm *FakeContainerManager) Start(_ context.Context, _ *v1.Node, _ ActivePodsFunc, _ GetNodeFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService, _ bool) error { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "Start") - return nil -} - -func (cm *FakeContainerManager) SystemCgroupsLimit() v1.ResourceList { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "SystemCgroupsLimit") - return v1.ResourceList{} -} - -func (cm *FakeContainerManager) GetNodeConfig() NodeConfig { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetNodeConfig") - return NodeConfig{} -} - -func (cm *FakeContainerManager) GetMountedSubsystems() *CgroupSubsystems { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetMountedSubsystems") - return &CgroupSubsystems{} -} - -func (cm *FakeContainerManager) GetQOSContainersInfo() QOSContainersInfo { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "QOSContainersInfo") - return QOSContainersInfo{} -} - -func (cm *FakeContainerManager) UpdateQOSCgroups() error { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "UpdateQOSCgroups") - return nil -} - -func (cm *FakeContainerManager) Status() Status { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "Status") - return Status{} -} - -func (cm *FakeContainerManager) GetNodeAllocatableReservation() v1.ResourceList { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetNodeAllocatableReservation") - return nil -} - -func (cm *FakeContainerManager) GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetCapacity") - if !localStorageCapacityIsolation { - return v1.ResourceList{} - } - c := v1.ResourceList{ - v1.ResourceEphemeralStorage: *resource.NewQuantity( - int64(0), - resource.BinarySI), - } - return c -} - -func (cm *FakeContainerManager) GetPluginRegistrationHandlers() map[string]cache.PluginHandler { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetPluginRegistrationHandlers") - return nil -} - -func (cm *FakeContainerManager) GetHealthCheckers() []healthz.HealthChecker { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetPluginRegistrationServerChecker") - return []healthz.HealthChecker{} -} - -func (cm *FakeContainerManager) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetDevicePluginResourceCapacity") - return nil, nil, []string{} -} - -func (cm *FakeContainerManager) NewPodContainerManager() PodContainerManager { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "PodContainerManager") - return cm.PodContainerManager -} - -func (cm *FakeContainerManager) GetResources(ctx context.Context, pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetResources") - return &kubecontainer.RunContainerOptions{}, nil -} - -func (cm *FakeContainerManager) UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "UpdatePluginResources") - return nil -} - -func (cm *FakeContainerManager) InternalContainerLifecycle() InternalContainerLifecycle { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "InternalContainerLifecycle") - return &internalContainerLifecycleImpl{cpumanager.NewFakeManager(), memorymanager.NewFakeManager(), topologymanager.NewFakeManager()} -} - -func (cm *FakeContainerManager) GetPodCgroupRoot() string { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetPodCgroupRoot") - return "" -} - -func (cm *FakeContainerManager) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetDevices") - return nil -} - -func (cm *FakeContainerManager) GetAllocatableDevices() []*podresourcesapi.ContainerDevices { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocatableDevices") - return nil -} - -func (cm *FakeContainerManager) ShouldResetExtendedResourceCapacity() bool { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "ShouldResetExtendedResourceCapacity") - return cm.shouldResetExtendedResourceCapacity -} - -func (cm *FakeContainerManager) GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocateResourcesPodAdmitHandler") - return topologymanager.NewFakeManager() -} - -func (cm *FakeContainerManager) UpdateAllocatedDevices() { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "UpdateAllocatedDevices") - return -} - -func (cm *FakeContainerManager) GetCPUs(_, _ string) []int64 { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetCPUs") - return nil -} - -func (cm *FakeContainerManager) GetAllocatableCPUs() []int64 { - cm.Lock() - defer cm.Unlock() - return nil -} - -func (cm *FakeContainerManager) GetMemory(_, _ string) []*podresourcesapi.ContainerMemory { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetMemory") - return nil -} - -func (cm *FakeContainerManager) GetAllocatableMemory() []*podresourcesapi.ContainerMemory { - cm.Lock() - defer cm.Unlock() - return nil -} - -func (cm *FakeContainerManager) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource { - return nil -} - -func (cm *FakeContainerManager) GetNodeAllocatableAbsolute() v1.ResourceList { - cm.Lock() - defer cm.Unlock() - return nil -} - -func (cm *FakeContainerManager) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error { - return nil -} - -func (cm *FakeContainerManager) UnprepareDynamicResources(context.Context, *v1.Pod) error { - return nil -} - -func (cm *FakeContainerManager) PodMightNeedToUnprepareResources(UID types.UID) bool { - return false -} -func (cm *FakeContainerManager) UpdateAllocatedResourcesStatus(pod *v1.Pod, status *v1.PodStatus) { -} -func (cm *FakeContainerManager) Updates() <-chan resourceupdates.Update { - return nil -} - -func (cm *FakeContainerManager) PodHasExclusiveCPUs(pod *v1.Pod) bool { - return false -} - -func (cm *FakeContainerManager) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool { - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_internal_container_lifecycle.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_internal_container_lifecycle.go deleted file mode 100644 index d133f28f17c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_internal_container_lifecycle.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" -) - -func NewFakeInternalContainerLifecycle() *fakeInternalContainerLifecycle { - return &fakeInternalContainerLifecycle{} -} - -type fakeInternalContainerLifecycle struct{} - -func (f *fakeInternalContainerLifecycle) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { - return nil -} - -func (f *fakeInternalContainerLifecycle) PreStartContainer(pod *v1.Pod, container *v1.Container, containerID string) error { - return nil -} - -func (f *fakeInternalContainerLifecycle) PostStopContainer(containerID string) error { - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_pod_container_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_pod_container_manager.go deleted file mode 100644 index c3ffae15078..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_pod_container_manager.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "reflect" - "sync" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" -) - -type FakePodContainerManager struct { - sync.Mutex - CalledFunctions []string - Cgroups map[types.UID]CgroupName -} - -var _ PodContainerManager = &FakePodContainerManager{} - -func NewFakePodContainerManager() *FakePodContainerManager { - return &FakePodContainerManager{ - Cgroups: make(map[types.UID]CgroupName), - } -} - -func (m *FakePodContainerManager) AddPodFromCgroups(pod *kubecontainer.Pod) { - m.Lock() - defer m.Unlock() - m.Cgroups[pod.ID] = []string{pod.Name} -} - -func (m *FakePodContainerManager) Exists(_ *v1.Pod) bool { - m.Lock() - defer m.Unlock() - m.CalledFunctions = append(m.CalledFunctions, "Exists") - return true -} - -func (m *FakePodContainerManager) EnsureExists(_ *v1.Pod) error { - m.Lock() - defer m.Unlock() - m.CalledFunctions = append(m.CalledFunctions, "EnsureExists") - return nil -} - -func (m *FakePodContainerManager) GetPodContainerName(_ *v1.Pod) (CgroupName, string) { - m.Lock() - defer m.Unlock() - m.CalledFunctions = append(m.CalledFunctions, "GetPodContainerName") - return nil, "" -} - -func (m *FakePodContainerManager) Destroy(name CgroupName) error { - m.Lock() - defer m.Unlock() - m.CalledFunctions = append(m.CalledFunctions, "Destroy") - for key, cgname := range m.Cgroups { - if reflect.DeepEqual(cgname, name) { - delete(m.Cgroups, key) - return nil - } - } - return nil -} - -func (m *FakePodContainerManager) ReduceCPULimits(_ CgroupName) error { - m.Lock() - defer m.Unlock() - m.CalledFunctions = append(m.CalledFunctions, "ReduceCPULimits") - return nil -} - -func (m *FakePodContainerManager) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) { - m.Lock() - defer m.Unlock() - m.CalledFunctions = append(m.CalledFunctions, "GetAllPodsFromCgroups") - // return a copy for the race detector - grp := make(map[types.UID]CgroupName) - for key, value := range m.Cgroups { - grp[key] = value - } - return grp, nil -} - -func (m *FakePodContainerManager) IsPodCgroup(cgroupfs string) (bool, types.UID) { - m.Lock() - defer m.Unlock() - m.CalledFunctions = append(m.CalledFunctions, "IsPodCgroup") - return false, types.UID("") -} - -func (cm *FakePodContainerManager) GetPodCgroupMemoryUsage(_ *v1.Pod) (uint64, error) { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetPodCgroupMemoryUsage") - return 0, nil -} - -func (cm *FakePodContainerManager) GetPodCgroupConfig(_ *v1.Pod, _ v1.ResourceName) (*ResourceConfig, error) { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "GetPodCgroupConfig") - return nil, nil -} - -func (cm *FakePodContainerManager) SetPodCgroupConfig(pod *v1.Pod, resourceConfig *ResourceConfig) error { - cm.Lock() - defer cm.Unlock() - cm.CalledFunctions = append(cm.CalledFunctions, "SetPodCgroupConfig") - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers.go deleted file mode 100644 index 1de4f7d5284..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "context" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" -) - -// for typecheck across platforms -var _ func(int64, int64) int64 = MilliCPUToQuota -var _ func(int64) uint64 = MilliCPUToShares -var _ func(*v1.Pod, bool, uint64, bool) *ResourceConfig = ResourceConfigForPod -var _ func() (*CgroupSubsystems, error) = GetCgroupSubsystems -var _ func(string) ([]int, error) = getCgroupProcs -var _ func(types.UID) string = GetPodCgroupNameSuffix -var _ func(string, bool, string) string = NodeAllocatableRoot -var _ func(string) (string, error) = GetKubeletContainer - -// hardEvictionReservation returns a resourcelist that includes reservation of resources based on hard eviction thresholds. -func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.ResourceList) v1.ResourceList { - if len(thresholds) == 0 { - return nil - } - ret := v1.ResourceList{} - for _, threshold := range thresholds { - if threshold.Operator != evictionapi.OpLessThan { - continue - } - switch threshold.Signal { - case evictionapi.SignalMemoryAvailable: - memoryCapacity := capacity[v1.ResourceMemory] - value := evictionapi.GetThresholdQuantity(threshold.Value, &memoryCapacity) - ret[v1.ResourceMemory] = *value - case evictionapi.SignalNodeFsAvailable: - storageCapacity := capacity[v1.ResourceEphemeralStorage] - value := evictionapi.GetThresholdQuantity(threshold.Value, &storageCapacity) - ret[v1.ResourceEphemeralStorage] = *value - } - } - return ret -} - -func buildContainerMapAndRunningSetFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) (containermap.ContainerMap, sets.Set[string]) { - podSandboxMap := make(map[string]string) - podSandboxList, _ := runtimeService.ListPodSandbox(ctx, nil) - for _, p := range podSandboxList { - podSandboxMap[p.Id] = p.Metadata.Uid - } - - runningSet := sets.New[string]() - containerMap := containermap.NewContainerMap() - containerList, _ := runtimeService.ListContainers(ctx, nil) - for _, c := range containerList { - if _, exists := podSandboxMap[c.PodSandboxId]; !exists { - klog.InfoS("No PodSandBox found for the container", "podSandboxId", c.PodSandboxId, "containerName", c.Metadata.Name, "containerId", c.Id) - continue - } - podUID := podSandboxMap[c.PodSandboxId] - containerMap.Add(podUID, c.Metadata.Name, c.Id) - if c.State == runtimeapi.ContainerState_CONTAINER_RUNNING { - klog.V(4).InfoS("Container reported running", "podSandboxId", c.PodSandboxId, "podUID", podUID, "containerName", c.Metadata.Name, "containerId", c.Id) - runningSet.Insert(c.Id) - } - } - return containerMap, runningSet -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go deleted file mode 100644 index 8e2ffd7d2bb..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go +++ /dev/null @@ -1,343 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "strconv" - - libcontainercgroups "github.com/opencontainers/cgroups" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - utilfeature "k8s.io/apiserver/pkg/util/feature" - - "k8s.io/component-helpers/resource" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" - kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/cm/util" -) - -const ( - // These limits are defined in the kernel: - // https://github.com/torvalds/linux/blob/0bddd227f3dc55975e2b8dfa7fc6f959b062a2c7/kernel/sched/sched.h#L427-L428 - MinShares = 2 - MaxShares = 262144 - - SharesPerCPU = 1024 - MilliCPUToCPU = 1000 - - // 100000 microseconds is equivalent to 100ms - QuotaPeriod = 100000 - // 1000 microseconds is equivalent to 1ms - // defined here: - // https://github.com/torvalds/linux/blob/cac03ac368fabff0122853de2422d4e17a32de08/kernel/sched/core.c#L10546 - MinQuotaPeriod = 1000 - - // From the inverse of the conversion in MilliCPUToQuota: - // MinQuotaPeriod * MilliCPUToCPU / QuotaPeriod - MinMilliCPULimit = 10 -) - -// MilliCPUToQuota converts milliCPU to CFS quota and period values. -// Input parameters and resulting value is number of microseconds. -func MilliCPUToQuota(milliCPU int64, period int64) (quota int64) { - // CFS quota is measured in two values: - // - cfs_period_us=100ms (the amount of time to measure usage across given by period) - // - cfs_quota=20ms (the amount of cpu time allowed to be used across a period) - // so in the above example, you are limited to 20% of a single CPU - // for multi-cpu environments, you just scale equivalent amounts - // see https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt for details - - if milliCPU == 0 { - return - } - - if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) { - period = QuotaPeriod - } - - // we then convert your milliCPU to a value normalized over a period - quota = (milliCPU * period) / MilliCPUToCPU - - // quota needs to be a minimum of 1ms. - if quota < MinQuotaPeriod { - quota = MinQuotaPeriod - } - return -} - -// MilliCPUToShares converts the milliCPU to CFS shares. -func MilliCPUToShares(milliCPU int64) uint64 { - if milliCPU == 0 { - // Docker converts zero milliCPU to unset, which maps to kernel default - // for unset: 1024. Return 2 here to really match kernel default for - // zero milliCPU. - return MinShares - } - // Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding. - shares := (milliCPU * SharesPerCPU) / MilliCPUToCPU - if shares < MinShares { - return MinShares - } - if shares > MaxShares { - return MaxShares - } - return uint64(shares) -} - -// HugePageLimits converts the API representation to a map -// from huge page size (in bytes) to huge page limit (in bytes). -func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 { - hugePageLimits := map[int64]int64{} - for k, v := range resourceList { - if v1helper.IsHugePageResourceName(k) { - pageSize, _ := v1helper.HugePageSizeFromResourceName(k) - if value, exists := hugePageLimits[pageSize.Value()]; exists { - hugePageLimits[pageSize.Value()] = value + v.Value() - } else { - hugePageLimits[pageSize.Value()] = v.Value() - } - } - } - return hugePageLimits -} - -// ResourceConfigForPod takes the input pod and outputs the cgroup resource config. -func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig { - podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) - // sum requests and limits. - reqs := resource.PodRequests(allocatedPod, resource.PodResourcesOptions{ - // SkipPodLevelResources is set to false when PodLevelResources feature is enabled. - SkipPodLevelResources: !podLevelResourcesEnabled, - UseStatusResources: false, - }) - // track if limits were applied for each resource. - memoryLimitsDeclared := true - cpuLimitsDeclared := true - - limits := resource.PodLimits(allocatedPod, resource.PodResourcesOptions{ - // SkipPodLevelResources is set to false when PodLevelResources feature is enabled. - SkipPodLevelResources: !podLevelResourcesEnabled, - ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) { - if res.Cpu().IsZero() { - cpuLimitsDeclared = false - } - if res.Memory().IsZero() { - memoryLimitsDeclared = false - } - }, - }) - - if podLevelResourcesEnabled && resource.IsPodLevelResourcesSet(allocatedPod) { - if !allocatedPod.Spec.Resources.Limits.Cpu().IsZero() { - cpuLimitsDeclared = true - } - - if !allocatedPod.Spec.Resources.Limits.Memory().IsZero() { - memoryLimitsDeclared = true - } - } - // map hugepage pagesize (bytes) to limits (bytes) - hugePageLimits := HugePageLimits(reqs) - - cpuRequests := int64(0) - cpuLimits := int64(0) - memoryLimits := int64(0) - if request, found := reqs[v1.ResourceCPU]; found { - cpuRequests = request.MilliValue() - } - if limit, found := limits[v1.ResourceCPU]; found { - cpuLimits = limit.MilliValue() - } - if limit, found := limits[v1.ResourceMemory]; found { - memoryLimits = limit.Value() - } - - // convert to CFS values - cpuShares := MilliCPUToShares(cpuRequests) - cpuQuota := MilliCPUToQuota(cpuLimits, int64(cpuPeriod)) - - // quota is not capped when cfs quota is disabled - if !enforceCPULimits { - cpuQuota = int64(-1) - } - - // determine the qos class - qosClass := v1qos.GetPodQOS(allocatedPod) - - // build the result - result := &ResourceConfig{} - if qosClass == v1.PodQOSGuaranteed { - result.CPUShares = &cpuShares - result.CPUQuota = &cpuQuota - result.CPUPeriod = &cpuPeriod - result.Memory = &memoryLimits - } else if qosClass == v1.PodQOSBurstable { - result.CPUShares = &cpuShares - if cpuLimitsDeclared { - result.CPUQuota = &cpuQuota - result.CPUPeriod = &cpuPeriod - } - if memoryLimitsDeclared { - result.Memory = &memoryLimits - } - } else { - shares := uint64(MinShares) - result.CPUShares = &shares - } - result.HugePageLimit = hugePageLimits - - if enforceMemoryQoS { - memoryMin := int64(0) - if request, found := reqs[v1.ResourceMemory]; found { - memoryMin = request.Value() - } - if memoryMin > 0 { - result.Unified = map[string]string{ - Cgroup2MemoryMin: strconv.FormatInt(memoryMin, 10), - } - } - } - - return result -} - -// getCgroupSubsystemsV1 returns information about the mounted cgroup v1 subsystems -func getCgroupSubsystemsV1() (*CgroupSubsystems, error) { - // get all cgroup mounts. - allCgroups, err := libcontainercgroups.GetCgroupMounts(true) - if err != nil { - return &CgroupSubsystems{}, err - } - if len(allCgroups) == 0 { - return &CgroupSubsystems{}, fmt.Errorf("failed to find cgroup mounts") - } - mountPoints := make(map[string]string, len(allCgroups)) - for _, mount := range allCgroups { - // BEFORE kubelet used a random mount point per cgroups subsystem; - // NOW more deterministic: kubelet use mount point with shortest path; - // FUTURE is bright with clear expectation determined in doc. - // ref. issue: https://github.com/kubernetes/kubernetes/issues/95488 - - for _, subsystem := range mount.Subsystems { - previous := mountPoints[subsystem] - if previous == "" || len(mount.Mountpoint) < len(previous) { - mountPoints[subsystem] = mount.Mountpoint - } - } - } - return &CgroupSubsystems{ - Mounts: allCgroups, - MountPoints: mountPoints, - }, nil -} - -// getCgroupSubsystemsV2 returns information about the enabled cgroup v2 subsystems -func getCgroupSubsystemsV2() (*CgroupSubsystems, error) { - controllers, err := libcontainercgroups.GetAllSubsystems() - if err != nil { - return nil, err - } - - mounts := []libcontainercgroups.Mount{} - mountPoints := make(map[string]string, len(controllers)) - for _, controller := range controllers { - mountPoints[controller] = util.CgroupRoot - m := libcontainercgroups.Mount{ - Mountpoint: util.CgroupRoot, - Root: util.CgroupRoot, - Subsystems: []string{controller}, - } - mounts = append(mounts, m) - } - - return &CgroupSubsystems{ - Mounts: mounts, - MountPoints: mountPoints, - }, nil -} - -// GetCgroupSubsystems returns information about the mounted cgroup subsystems -func GetCgroupSubsystems() (*CgroupSubsystems, error) { - if libcontainercgroups.IsCgroup2UnifiedMode() { - return getCgroupSubsystemsV2() - } - - return getCgroupSubsystemsV1() -} - -// getCgroupProcs takes a cgroup directory name as an argument -// reads through the cgroup's procs file and returns a list of tgid's. -// It returns an empty list if a procs file doesn't exists -func getCgroupProcs(dir string) ([]int, error) { - procsFile := filepath.Join(dir, "cgroup.procs") - f, err := os.Open(procsFile) - if err != nil { - if os.IsNotExist(err) { - // The procsFile does not exist, So no pids attached to this directory - return []int{}, nil - } - return nil, err - } - defer f.Close() - - s := bufio.NewScanner(f) - out := []int{} - for s.Scan() { - if t := s.Text(); t != "" { - pid, err := strconv.Atoi(t) - if err != nil { - return nil, fmt.Errorf("unexpected line in %v; could not convert to pid: %v", procsFile, err) - } - out = append(out, pid) - } - } - return out, nil -} - -// GetPodCgroupNameSuffix returns the last element of the pod CgroupName identifier -func GetPodCgroupNameSuffix(podUID types.UID) string { - return podCgroupNamePrefix + string(podUID) -} - -// NodeAllocatableRoot returns the literal cgroup path for the node allocatable cgroup -func NodeAllocatableRoot(cgroupRoot string, cgroupsPerQOS bool, cgroupDriver string) string { - nodeAllocatableRoot := ParseCgroupfsToCgroupName(cgroupRoot) - if cgroupsPerQOS { - nodeAllocatableRoot = NewCgroupName(nodeAllocatableRoot, defaultNodeAllocatableCgroupName) - } - if cgroupDriver == "systemd" { - return nodeAllocatableRoot.ToSystemd() - } - return nodeAllocatableRoot.ToCgroupfs() -} - -// GetKubeletContainer returns the cgroup the kubelet will use -func GetKubeletContainer(kubeletCgroups string) (string, error) { - if kubeletCgroups == "" { - cont, err := getContainer(os.Getpid()) - if err != nil { - return "", err - } - return cont, nil - } - return kubeletCgroups, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go deleted file mode 100644 index 34d4ee59d12..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build !linux -// +build !linux - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" -) - -const ( - MinShares = 0 - MaxShares = 0 - - SharesPerCPU = 0 - MilliCPUToCPU = 0 - - QuotaPeriod = 0 - MinQuotaPeriod = 0 - MinMilliCPULimit = 0 -) - -// MilliCPUToQuota converts milliCPU and period to CFS quota values. -func MilliCPUToQuota(milliCPU, period int64) int64 { - return 0 -} - -// MilliCPUToShares converts the milliCPU to CFS shares. -func MilliCPUToShares(milliCPU int64) uint64 { - return 0 -} - -// ResourceConfigForPod takes the input pod and outputs the cgroup resource config. -func ResourceConfigForPod(pod *v1.Pod, enforceCPULimit bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig { - return nil -} - -// GetCgroupSubsystems returns information about the mounted cgroup subsystems -func GetCgroupSubsystems() (*CgroupSubsystems, error) { - return nil, nil -} - -func getCgroupProcs(dir string) ([]int, error) { - return nil, nil -} - -// GetPodCgroupNameSuffix returns the last element of the pod CgroupName identifier -func GetPodCgroupNameSuffix(podUID types.UID) string { - return "" -} - -// NodeAllocatableRoot returns the literal cgroup path for the node allocatable cgroup -func NodeAllocatableRoot(cgroupRoot string, cgroupsPerQOS bool, cgroupDriver string) string { - return "" -} - -// GetKubeletContainer returns the cgroup the kubelet will use -func GetKubeletContainer(kubeletCgroups string) (string, error) { - return "", nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle.go deleted file mode 100644 index 5e50fd16662..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" - "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" -) - -type InternalContainerLifecycle interface { - PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error - PreStartContainer(pod *v1.Pod, container *v1.Container, containerID string) error - PostStopContainer(containerID string) error -} - -// Implements InternalContainerLifecycle interface. -type internalContainerLifecycleImpl struct { - cpuManager cpumanager.Manager - memoryManager memorymanager.Manager - topologyManager topologymanager.Manager -} - -func (i *internalContainerLifecycleImpl) PreStartContainer(pod *v1.Pod, container *v1.Container, containerID string) error { - if i.cpuManager != nil { - i.cpuManager.AddContainer(pod, container, containerID) - } - - if i.memoryManager != nil { - i.memoryManager.AddContainer(pod, container, containerID) - } - - i.topologyManager.AddContainer(pod, container, containerID) - - return nil -} - -func (i *internalContainerLifecycleImpl) PostStopContainer(containerID string) error { - return i.topologyManager.RemoveContainer(containerID) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle_linux.go deleted file mode 100644 index 0c3bb2e4999..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle_linux.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build linux -// +build linux - -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "strconv" - "strings" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" -) - -func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { - if i.cpuManager != nil { - allocatedCPUs := i.cpuManager.GetCPUAffinity(string(pod.UID), container.Name) - if !allocatedCPUs.IsEmpty() { - containerConfig.Linux.Resources.CpusetCpus = allocatedCPUs.String() - } - } - - if i.memoryManager != nil { - numaNodes := i.memoryManager.GetMemoryNUMANodes(pod, container) - if numaNodes.Len() > 0 { - var affinity []string - for _, numaNode := range sets.List(numaNodes) { - affinity = append(affinity, strconv.Itoa(numaNode)) - } - containerConfig.Linux.Resources.CpusetMems = strings.Join(affinity, ",") - } - } - - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle_unsupported.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle_unsupported.go deleted file mode 100644 index 5028c40bbbc..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle_unsupported.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build !linux && !windows -// +build !linux,!windows - -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" -) - -func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle_windows.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle_windows.go deleted file mode 100644 index 939658d846f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle_windows.go +++ /dev/null @@ -1,141 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - "k8s.io/klog/v2" - kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/winstats" - "k8s.io/utils/cpuset" -) - -func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { - if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WindowsCPUAndMemoryAffinity) { - return nil - } - - klog.V(4).Info("PreCreateContainer for Windows") - - // retrieve CPU and NUMA affinity from CPU Manager and Memory Manager (if enabled) - var allocatedCPUs cpuset.CPUSet - if i.cpuManager != nil { - allocatedCPUs = i.cpuManager.GetCPUAffinity(string(pod.UID), container.Name) - } - - var numaNodes sets.Set[int] - if i.memoryManager != nil { - numaNodes = i.memoryManager.GetMemoryNUMANodes(pod, container) - } - - // Gather all CPUs associated with the selected NUMA nodes - var allNumaNodeCPUs []winstats.GroupAffinity - for _, numaNode := range sets.List(numaNodes) { - affinity, err := winstats.GetCPUsforNUMANode(uint16(numaNode)) - if err != nil { - return fmt.Errorf("failed to get CPUs for NUMA node %d: %v", numaNode, err) - } - allNumaNodeCPUs = append(allNumaNodeCPUs, *affinity) - } - - var finalCPUSet = computeFinalCpuSet(allocatedCPUs, allNumaNodeCPUs) - - klog.V(4).InfoS("Setting CPU affinity", "affinity", finalCPUSet, "container", container.Name, "pod", pod.UID) - - // Set CPU group affinities in the container config - if finalCPUSet != nil { - var cpusToGroupAffinities []*runtimeapi.WindowsCpuGroupAffinity - for group, mask := range groupMasks(finalCPUSet) { - - cpusToGroupAffinities = append(cpusToGroupAffinities, &runtimeapi.WindowsCpuGroupAffinity{ - CpuGroup: uint32(group), - CpuMask: uint64(mask), - }) - } - containerConfig.Windows.Resources.AffinityCpus = cpusToGroupAffinities - } - - // return nil if no CPUs were selected - return nil -} - -// computeFinalCpuSet determines the final set of CPUs to use based on the CPU and memory managers -// and is extracted so that it can be tested -func computeFinalCpuSet(allocatedCPUs cpuset.CPUSet, allNumaNodeCPUs []winstats.GroupAffinity) sets.Set[int] { - if !allocatedCPUs.IsEmpty() && len(allNumaNodeCPUs) > 0 { - // Both CPU and memory managers are enabled - - numaNodeAffinityCPUSet := computeCPUSet(allNumaNodeCPUs) - cpuManagerAffinityCPUSet := sets.New[int](allocatedCPUs.List()...) - - // Determine which set of CPUs to use using the following logic outlined in the KEP: - // Case 1: CPU manager selects more CPUs than those available in the NUMA nodes selected by the memory manager - // Case 2: CPU manager selects fewer CPUs, and they all fall within the CPUs available in the NUMA nodes selected by the memory manager - // Case 3: CPU manager selects fewer CPUs, but some are outside of the CPUs available in the NUMA nodes selected by the memory manager - - if cpuManagerAffinityCPUSet.Len() > numaNodeAffinityCPUSet.Len() { - // Case 1, use CPU manager selected CPUs - return cpuManagerAffinityCPUSet - } else if numaNodeAffinityCPUSet.IsSuperset(cpuManagerAffinityCPUSet) { - // case 2, use CPU manager selected CPUstry - return cpuManagerAffinityCPUSet - } else { - // Case 3, merge CPU manager and memory manager selected CPUs - return cpuManagerAffinityCPUSet.Union(numaNodeAffinityCPUSet) - } - } else if !allocatedCPUs.IsEmpty() { - // Only CPU manager is enabled, use CPU manager selected CPUs - return sets.New[int](allocatedCPUs.List()...) - } else if len(allNumaNodeCPUs) > 0 { - // Only memory manager is enabled, use CPUs associated with selected NUMA nodes - return computeCPUSet(allNumaNodeCPUs) - } - return nil -} - -// computeCPUSet converts a list of GroupAffinity to a set of CPU IDs -func computeCPUSet(affinities []winstats.GroupAffinity) sets.Set[int] { - cpuSet := sets.New[int]() - for _, affinity := range affinities { - for i := 0; i < 64; i++ { - if (affinity.Mask>>i)&1 == 1 { - cpuID := int(affinity.Group)*64 + i - cpuSet.Insert(cpuID) - } - } - } - return cpuSet -} - -// groupMasks converts a set of CPU IDs into group and mask representations -func groupMasks(cpuSet sets.Set[int]) map[int]uint64 { - groupMasks := make(map[int]uint64) - for cpu := range cpuSet { - group := cpu / 64 - mask := uint64(1) << (cpu % 64) - groupMasks[group] |= mask - } - return groupMasks -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/fake_memory_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/fake_memory_manager.go deleted file mode 100644 index 46874e50050..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/fake_memory_manager.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package memorymanager - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/config" - "k8s.io/kubernetes/pkg/kubelet/status" -) - -type fakeManager struct { - state state.State -} - -func (m *fakeManager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error { - klog.InfoS("Start()") - return nil -} - -func (m *fakeManager) Policy() Policy { - klog.InfoS("Policy()") - return NewPolicyNone() -} - -func (m *fakeManager) Allocate(pod *v1.Pod, container *v1.Container) error { - klog.InfoS("Allocate", "pod", klog.KObj(pod), "containerName", container.Name) - return nil -} - -func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { - klog.InfoS("Add container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID) -} - -func (m *fakeManager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] { - klog.InfoS("Get MemoryNUMANodes", "pod", klog.KObj(pod), "containerName", container.Name) - return nil -} - -func (m *fakeManager) RemoveContainer(containerID string) error { - klog.InfoS("RemoveContainer", "containerID", containerID) - return nil -} - -func (m *fakeManager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - klog.InfoS("Get Topology Hints", "pod", klog.KObj(pod), "containerName", container.Name) - return map[string][]topologymanager.TopologyHint{} -} - -func (m *fakeManager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { - klog.InfoS("Get Pod Topology Hints", "pod", klog.KObj(pod)) - return map[string][]topologymanager.TopologyHint{} -} - -func (m *fakeManager) State() state.Reader { - return m.state -} - -// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node -func (m *fakeManager) GetAllocatableMemory() []state.Block { - klog.InfoS("Get Allocatable Memory") - return []state.Block{} -} - -// GetMemory returns the memory allocated by a container from NUMA nodes -func (m *fakeManager) GetMemory(podUID, containerName string) []state.Block { - klog.InfoS("Get Memory", "podUID", podUID, "containerName", containerName) - return []state.Block{} -} - -// NewFakeManager creates empty/fake memory manager -func NewFakeManager() Manager { - return &fakeManager{ - state: state.NewMemoryState(), - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/memory_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/memory_manager.go deleted file mode 100644 index 448539fc0cd..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/memory_manager.go +++ /dev/null @@ -1,468 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package memorymanager - -import ( - "context" - "fmt" - "runtime" - "sync" - - cadvisorapi "github.com/google/cadvisor/info/v1" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/sets" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - corev1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/config" - "k8s.io/kubernetes/pkg/kubelet/status" -) - -// memoryManagerStateFileName is the file name where memory manager stores its state -const memoryManagerStateFileName = "memory_manager_state" - -// ActivePodsFunc is a function that returns a list of active pods -type ActivePodsFunc func() []*v1.Pod - -type runtimeService interface { - UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error -} - -type sourcesReadyStub struct{} - -func (s *sourcesReadyStub) AddSource(source string) {} -func (s *sourcesReadyStub) AllReady() bool { return true } - -// Manager interface provides methods for Kubelet to manage pod memory. -type Manager interface { - // Start is called during Kubelet initialization. - Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error - - // AddContainer adds the mapping between container ID to pod UID and the container name - // The mapping used to remove the memory allocation during the container removal - AddContainer(p *v1.Pod, c *v1.Container, containerID string) - - // Allocate is called to pre-allocate memory resources during Pod admission. - // This must be called at some point prior to the AddContainer() call for a container, e.g. at pod admission time. - Allocate(pod *v1.Pod, container *v1.Container) error - - // RemoveContainer is called after Kubelet decides to kill or delete a - // container. After this call, any memory allocated to the container is freed. - RemoveContainer(containerID string) error - - // State returns a read-only interface to the internal memory manager state. - State() state.Reader - - // GetTopologyHints implements the topologymanager.HintProvider Interface - // and is consulted to achieve NUMA aware resource alignment among this - // and other resource controllers. - GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint - - // GetPodTopologyHints implements the topologymanager.HintProvider Interface - // and is consulted to achieve NUMA aware resource alignment among this - // and other resource controllers. - GetPodTopologyHints(*v1.Pod) map[string][]topologymanager.TopologyHint - - // GetMemoryNUMANodes provides NUMA nodes that are used to allocate the container memory - GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] - - // GetAllocatableMemory returns the amount of allocatable memory for each NUMA node - GetAllocatableMemory() []state.Block - - // GetMemory returns the memory allocated by a container from NUMA nodes - GetMemory(podUID, containerName string) []state.Block -} - -type manager struct { - sync.Mutex - policy Policy - - // state allows to restore information regarding memory allocation for guaranteed pods - // in the case of the kubelet restart - state state.State - - // containerRuntime is the container runtime service interface needed - // to make UpdateContainerResources() calls against the containers. - containerRuntime runtimeService - - // activePods is a method for listing active pods on the node - // so all the containers can be updated during call to the removeStaleState. - activePods ActivePodsFunc - - // podStatusProvider provides a method for obtaining pod statuses - // and the containerID of their containers - podStatusProvider status.PodStatusProvider - - // containerMap provides a mapping from (pod, container) -> containerID - // for all containers a pod - containerMap containermap.ContainerMap - - // sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness. - // We use it to determine when we can purge inactive pods from checkpointed state. - sourcesReady config.SourcesReady - - // stateFileDirectory holds the directory where the state file for checkpoints is held. - stateFileDirectory string - - // allocatableMemory holds the allocatable memory for each NUMA node - allocatableMemory []state.Block -} - -var _ Manager = &manager{} - -// NewManager returns new instance of the memory manager -func NewManager(policyName string, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory []kubeletconfig.MemoryReservation, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) { - var policy Policy - - switch policyType(policyName) { - - case policyTypeNone: - policy = NewPolicyNone() - - case policyTypeStatic: - if runtime.GOOS == "windows" { - return nil, fmt.Errorf("policy %q is not available on Windows", policyTypeStatic) - } - - systemReserved, err := getSystemReservedMemory(machineInfo, nodeAllocatableReservation, reservedMemory) - if err != nil { - return nil, err - } - - policy, err = NewPolicyStatic(machineInfo, systemReserved, affinity) - if err != nil { - return nil, err - } - - case policyTypeBestEffort: - if runtime.GOOS == "windows" { - systemReserved, err := getSystemReservedMemory(machineInfo, nodeAllocatableReservation, reservedMemory) - if err != nil { - return nil, err - } - policy, err = NewPolicyBestEffort(machineInfo, systemReserved, affinity) - if err != nil { - return nil, err - } - } else { - return nil, fmt.Errorf("policy %q is not available for platform %q", policyTypeBestEffort, runtime.GOOS) - } - - default: - return nil, fmt.Errorf("unknown policy: %q", policyName) - } - - manager := &manager{ - policy: policy, - stateFileDirectory: stateFileDirectory, - } - manager.sourcesReady = &sourcesReadyStub{} - return manager, nil -} - -// Start starts the memory manager under the kubelet and calls policy start -func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error { - klog.InfoS("Starting memorymanager", "policy", m.policy.Name()) - m.sourcesReady = sourcesReady - m.activePods = activePods - m.podStatusProvider = podStatusProvider - m.containerRuntime = containerRuntime - m.containerMap = initialContainers - - stateImpl, err := state.NewCheckpointState(m.stateFileDirectory, memoryManagerStateFileName, m.policy.Name()) - if err != nil { - klog.ErrorS(err, "Could not initialize checkpoint manager, please drain node and remove policy state file") - return err - } - m.state = stateImpl - - err = m.policy.Start(m.state) - if err != nil { - klog.ErrorS(err, "Policy start error") - return err - } - - m.allocatableMemory = m.policy.GetAllocatableMemory(m.state) - - klog.V(4).InfoS("memorymanager started", "policy", m.policy.Name()) - return nil -} - -// AddContainer saves the value of requested memory for the guaranteed pod under the state and set memory affinity according to the topolgy manager -func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { - m.Lock() - defer m.Unlock() - - m.containerMap.Add(string(pod.UID), container.Name, containerID) - - // Since we know that each init container always runs to completion before - // the next container starts, we can safely remove references to any previously - // started init containers. This will free up the memory from these init containers - // for use in other pods. If the current container happens to be an init container, - // we skip deletion of it until the next container is added, and this is called again. - for _, initContainer := range pod.Spec.InitContainers { - if initContainer.Name == container.Name { - break - } - - // Since a restartable init container remains running for the full - // duration of the pod's lifecycle, we should not remove it from the - // memory manager state. - if podutil.IsRestartableInitContainer(&initContainer) { - continue - } - - m.policyRemoveContainerByRef(string(pod.UID), initContainer.Name) - } -} - -// GetMemoryNUMANodes provides NUMA nodes that used to allocate the container memory -func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] { - // Get NUMA node affinity of blocks assigned to the container during Allocate() - numaNodes := sets.New[int]() - for _, block := range m.state.GetMemoryBlocks(string(pod.UID), container.Name) { - for _, nodeID := range block.NUMAAffinity { - // avoid nodes duplication when hugepages and memory blocks pinned to the same NUMA node - numaNodes.Insert(nodeID) - } - } - - if numaNodes.Len() == 0 { - klog.V(5).InfoS("NUMA nodes not available for allocation", "pod", klog.KObj(pod), "containerName", container.Name) - return nil - } - - klog.InfoS("Memory affinity", "pod", klog.KObj(pod), "containerName", container.Name, "numaNodes", numaNodes) - return numaNodes -} - -// Allocate is called to pre-allocate memory resources during Pod admission. -func (m *manager) Allocate(pod *v1.Pod, container *v1.Container) error { - // Garbage collect any stranded resources before allocation - m.removeStaleState() - - m.Lock() - defer m.Unlock() - - // Call down into the policy to assign this container memory if required. - if err := m.policy.Allocate(m.state, pod, container); err != nil { - klog.ErrorS(err, "Allocate error", "pod", klog.KObj(pod), "containerName", container.Name) - return err - } - return nil -} - -// RemoveContainer removes the container from the state -func (m *manager) RemoveContainer(containerID string) error { - m.Lock() - defer m.Unlock() - - // if error appears it means container entry already does not exist under the container map - podUID, containerName, err := m.containerMap.GetContainerRef(containerID) - if err != nil { - klog.ErrorS(err, "Failed to get container from container map", "containerID", containerID) - return nil - } - - m.policyRemoveContainerByRef(podUID, containerName) - - return nil -} - -// State returns the state of the manager -func (m *manager) State() state.Reader { - return m.state -} - -// GetPodTopologyHints returns the topology hints for the topology manager -func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { - // Garbage collect any stranded resources before providing TopologyHints - m.removeStaleState() - // Delegate to active policy - return m.policy.GetPodTopologyHints(m.state, pod) -} - -// GetTopologyHints returns the topology hints for the topology manager -func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - // Garbage collect any stranded resources before providing TopologyHints - m.removeStaleState() - // Delegate to active policy - return m.policy.GetTopologyHints(m.state, pod, container) -} - -// TODO: move the method to the upper level, to re-use it under the CPU and memory managers -func (m *manager) removeStaleState() { - // Only once all sources are ready do we attempt to remove any stale state. - // This ensures that the call to `m.activePods()` below will succeed with - // the actual active pods list. - if !m.sourcesReady.AllReady() { - return - } - - // We grab the lock to ensure that no new containers will grab memory block while - // executing the code below. Without this lock, its possible that we end up - // removing state that is newly added by an asynchronous call to - // AddContainer() during the execution of this code. - m.Lock() - defer m.Unlock() - - // Get the list of active pods. - activePods := m.activePods() - - // Build a list of (podUID, containerName) pairs for all containers in all active Pods. - activeContainers := make(map[string]map[string]struct{}) - for _, pod := range activePods { - activeContainers[string(pod.UID)] = make(map[string]struct{}) - for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - activeContainers[string(pod.UID)][container.Name] = struct{}{} - } - } - - // Loop through the MemoryManager state. Remove any state for containers not - // in the `activeContainers` list built above. - assignments := m.state.GetMemoryAssignments() - for podUID := range assignments { - for containerName := range assignments[podUID] { - if _, ok := activeContainers[podUID][containerName]; !ok { - klog.V(2).InfoS("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName) - m.policyRemoveContainerByRef(podUID, containerName) - } - } - } - - m.containerMap.Visit(func(podUID, containerName, containerID string) { - if _, ok := activeContainers[podUID][containerName]; !ok { - klog.V(2).InfoS("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName) - m.policyRemoveContainerByRef(podUID, containerName) - } - }) -} - -func (m *manager) policyRemoveContainerByRef(podUID string, containerName string) { - m.policy.RemoveContainer(m.state, podUID, containerName) - m.containerMap.RemoveByContainerRef(podUID, containerName) -} - -func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory []kubeletconfig.MemoryReservation) (map[v1.ResourceName]resource.Quantity, error) { - totalMemoryType := map[v1.ResourceName]resource.Quantity{} - - numaNodes := map[int]bool{} - for _, numaNode := range machineInfo.Topology { - numaNodes[numaNode.Id] = true - } - - for _, reservation := range reservedMemory { - if !numaNodes[int(reservation.NumaNode)] { - return nil, fmt.Errorf("the reserved memory configuration references a NUMA node %d that does not exist on this machine", reservation.NumaNode) - } - - for resourceName, q := range reservation.Limits { - if value, ok := totalMemoryType[resourceName]; ok { - q.Add(value) - } - totalMemoryType[resourceName] = q - } - } - - return totalMemoryType, nil -} - -func validateReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory []kubeletconfig.MemoryReservation) error { - totalMemoryType, err := getTotalMemoryTypeReserved(machineInfo, reservedMemory) - if err != nil { - return err - } - - commonMemoryTypeSet := make(map[v1.ResourceName]bool) - for resourceType := range totalMemoryType { - commonMemoryTypeSet[resourceType] = true - } - - for resourceType := range nodeAllocatableReservation { - if !(corev1helper.IsHugePageResourceName(resourceType) || resourceType == v1.ResourceMemory) { - continue - } - commonMemoryTypeSet[resourceType] = true - } - - for resourceType := range commonMemoryTypeSet { - nodeAllocatableMemory := resource.NewQuantity(0, resource.DecimalSI) - if memValue, set := nodeAllocatableReservation[resourceType]; set { - nodeAllocatableMemory.Add(memValue) - } - - reservedMemory := resource.NewQuantity(0, resource.DecimalSI) - if memValue, set := totalMemoryType[resourceType]; set { - reservedMemory.Add(memValue) - } - - if !(*nodeAllocatableMemory).Equal(*reservedMemory) { - return fmt.Errorf("the total amount %q of type %q is not equal to the value %q determined by Node Allocatable feature", reservedMemory.String(), resourceType, nodeAllocatableMemory.String()) - } - } - - return nil -} - -func convertReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory []kubeletconfig.MemoryReservation) (systemReservedMemory, error) { - reservedMemoryConverted := make(map[int]map[v1.ResourceName]uint64) - for _, node := range machineInfo.Topology { - reservedMemoryConverted[node.Id] = make(map[v1.ResourceName]uint64) - } - - for _, reservation := range reservedMemory { - for resourceName, q := range reservation.Limits { - val, success := q.AsInt64() - if !success { - return nil, fmt.Errorf("could not covert a variable of type Quantity to int64") - } - reservedMemoryConverted[int(reservation.NumaNode)][resourceName] = uint64(val) - } - } - - return reservedMemoryConverted, nil -} - -func getSystemReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory []kubeletconfig.MemoryReservation) (systemReservedMemory, error) { - if err := validateReservedMemory(machineInfo, nodeAllocatableReservation, reservedMemory); err != nil { - return nil, err - } - - reservedMemoryConverted, err := convertReserved(machineInfo, reservedMemory) - if err != nil { - return nil, err - } - - return reservedMemoryConverted, nil -} - -// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node -func (m *manager) GetAllocatableMemory() []state.Block { - return m.allocatableMemory -} - -// GetMemory returns the memory allocated by a container from NUMA nodes -func (m *manager) GetMemory(podUID, containerName string) []state.Block { - return m.state.GetMemoryBlocks(podUID, containerName) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy.go deleted file mode 100644 index a65a90dca98..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package memorymanager - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" -) - -// Type defines the policy type -type policyType string - -// Policy implements logic for pod container to a memory assignment. -type Policy interface { - Name() string - Start(s state.State) error - // Allocate call is idempotent - Allocate(s state.State, pod *v1.Pod, container *v1.Container) error - // RemoveContainer call is idempotent - RemoveContainer(s state.State, podUID string, containerName string) - // GetTopologyHints implements the topologymanager.HintProvider Interface - // and is consulted to achieve NUMA aware resource alignment among this - // and other resource controllers. - GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint - // GetPodTopologyHints implements the topologymanager.HintProvider Interface - // and is consulted to achieve NUMA aware resource alignment among this - // and other resource controllers. - GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint - // GetAllocatableMemory returns the amount of allocatable memory for each NUMA node - GetAllocatableMemory(s state.State) []state.Block -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_best_effort.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_best_effort.go deleted file mode 100644 index 2a2eabc8263..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_best_effort.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package memorymanager - -import ( - cadvisorapi "github.com/google/cadvisor/info/v1" - - v1 "k8s.io/api/core/v1" - - "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" -) - -// On Windows we want to use the same logic as the StaticPolicy to compute the memory topology hints -// but unlike linux based systems, on Windows systems numa nodes cannot be directly assigned or guaranteed via Windows APIs -// (windows scheduler will use the numa node that is closest to the cpu assigned therefor respecting the numa node assignment as a best effort). Because of this we don't want to have users specify "StaticPolicy" for the memory manager -// policy via kubelet configuration. Instead we want to use the "BestEffort" policy which will use the same logic as the StaticPolicy -// and doing so will reduce code duplication. -const policyTypeBestEffort policyType = "BestEffort" - -// bestEffortPolicy is implementation of the policy interface for the BestEffort policy -type bestEffortPolicy struct { - static *staticPolicy -} - -var _ Policy = &bestEffortPolicy{} - -func NewPolicyBestEffort(machineInfo *cadvisorapi.MachineInfo, reserved systemReservedMemory, affinity topologymanager.Store) (Policy, error) { - p, err := NewPolicyStatic(machineInfo, reserved, affinity) - - if err != nil { - return nil, err - } - - return &bestEffortPolicy{ - static: p.(*staticPolicy), - }, nil -} - -func (p *bestEffortPolicy) Name() string { - return string(policyTypeBestEffort) -} - -func (p *bestEffortPolicy) Start(s state.State) error { - return p.static.Start(s) -} - -func (p *bestEffortPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) { - return p.static.Allocate(s, pod, container) -} - -func (p *bestEffortPolicy) RemoveContainer(s state.State, podUID string, containerName string) { - p.static.RemoveContainer(s, podUID, containerName) -} - -func (p *bestEffortPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint { - return p.static.GetPodTopologyHints(s, pod) -} - -func (p *bestEffortPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - return p.static.GetTopologyHints(s, pod, container) -} - -func (p *bestEffortPolicy) GetAllocatableMemory(s state.State) []state.Block { - return p.static.GetAllocatableMemory(s) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_none.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_none.go deleted file mode 100644 index 5bb52db7ed4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_none.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package memorymanager - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" -) - -const policyTypeNone policyType = "None" - -// none is implementation of the policy interface for the none policy, using none -// policy is the same as disable memory management -type none struct{} - -var _ Policy = &none{} - -// NewPolicyNone returns new none policy instance -func NewPolicyNone() Policy { - return &none{} -} - -func (p *none) Name() string { - return string(policyTypeNone) -} - -func (p *none) Start(s state.State) error { - return nil -} - -// Allocate call is idempotent -func (p *none) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error { - return nil -} - -// RemoveContainer call is idempotent -func (p *none) RemoveContainer(s state.State, podUID string, containerName string) { -} - -// GetTopologyHints implements the topologymanager.HintProvider Interface -// and is consulted to achieve NUMA aware resource alignment among this -// and other resource controllers. -func (p *none) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - return nil -} - -// GetPodTopologyHints implements the topologymanager.HintProvider Interface -// and is consulted to achieve NUMA aware resource alignment among this -// and other resource controllers. -func (p *none) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint { - return nil -} - -// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node -func (p *none) GetAllocatableMemory(s state.State) []state.Block { - return []state.Block{} -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_static.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_static.go deleted file mode 100644 index eabbab23649..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_static.go +++ /dev/null @@ -1,1066 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package memorymanager - -import ( - "fmt" - "sort" - - cadvisorapi "github.com/google/cadvisor/info/v1" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - corehelper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" - "k8s.io/kubernetes/pkg/kubelet/metrics" -) - -const policyTypeStatic policyType = "Static" - -type systemReservedMemory map[int]map[v1.ResourceName]uint64 -type reusableMemory map[string]map[string]map[v1.ResourceName]uint64 - -// staticPolicy is implementation of the policy interface for the static policy -type staticPolicy struct { - // machineInfo contains machine memory related information - machineInfo *cadvisorapi.MachineInfo - // reserved contains memory that reserved for kube - systemReserved systemReservedMemory - // topology manager reference to get container Topology affinity - affinity topologymanager.Store - // initContainersReusableMemory contains the memory allocated for init - // containers that can be reused. - // Note that the restartable init container memory is not included here, - // because it is not reusable. - initContainersReusableMemory reusableMemory -} - -var _ Policy = &staticPolicy{} - -// NewPolicyStatic returns new static policy instance -func NewPolicyStatic(machineInfo *cadvisorapi.MachineInfo, reserved systemReservedMemory, affinity topologymanager.Store) (Policy, error) { - var totalSystemReserved uint64 - for _, node := range reserved { - if _, ok := node[v1.ResourceMemory]; !ok { - continue - } - totalSystemReserved += node[v1.ResourceMemory] - } - - // check if we have some reserved memory for the system - if totalSystemReserved <= 0 { - return nil, fmt.Errorf("[memorymanager] you should specify the system reserved memory") - } - - return &staticPolicy{ - machineInfo: machineInfo, - systemReserved: reserved, - affinity: affinity, - initContainersReusableMemory: reusableMemory{}, - }, nil -} - -func (p *staticPolicy) Name() string { - return string(policyTypeStatic) -} - -func (p *staticPolicy) Start(s state.State) error { - if err := p.validateState(s); err != nil { - klog.ErrorS(err, "Invalid state, please drain node and remove policy state file") - return err - } - return nil -} - -// Allocate call is idempotent -func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) { - // allocate the memory only for guaranteed pods - qos := v1qos.GetPodQOS(pod) - if qos != v1.PodQOSGuaranteed { - klog.V(5).InfoS("Exclusive memory allocation skipped, pod QoS is not guaranteed", "pod", klog.KObj(pod), "containerName", container.Name, "qos", qos) - return nil - } - - podUID := string(pod.UID) - klog.InfoS("Allocate", "pod", klog.KObj(pod), "containerName", container.Name) - // container belongs in an exclusively allocated pool - metrics.MemoryManagerPinningRequestTotal.Inc() - defer func() { - if rerr != nil { - metrics.MemoryManagerPinningErrorsTotal.Inc() - } - }() - if blocks := s.GetMemoryBlocks(podUID, container.Name); blocks != nil { - p.updatePodReusableMemory(pod, container, blocks) - - klog.InfoS("Container already present in state, skipping", "pod", klog.KObj(pod), "containerName", container.Name) - return nil - } - - // Call Topology Manager to get the aligned affinity across all hint providers. - hint := p.affinity.GetAffinity(podUID, container.Name) - klog.InfoS("Got topology affinity", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "hint", hint) - - requestedResources, err := getRequestedResources(pod, container) - if err != nil { - return err - } - - machineState := s.GetMachineState() - bestHint := &hint - // topology manager returned the hint with NUMA affinity nil - // we should use the default NUMA affinity calculated the same way as for the topology manager - if hint.NUMANodeAffinity == nil { - defaultHint, err := p.getDefaultHint(machineState, pod, requestedResources) - if err != nil { - return err - } - - if !defaultHint.Preferred && bestHint.Preferred { - return fmt.Errorf("[memorymanager] failed to find the default preferred hint") - } - bestHint = defaultHint - } - - // topology manager returns the hint that does not satisfy completely the container request - // we should extend this hint to the one who will satisfy the request and include the current hint - if !isAffinitySatisfyRequest(machineState, bestHint.NUMANodeAffinity, requestedResources) { - extendedHint, err := p.extendTopologyManagerHint(machineState, pod, requestedResources, bestHint.NUMANodeAffinity) - if err != nil { - return err - } - - if !extendedHint.Preferred && bestHint.Preferred { - return fmt.Errorf("[memorymanager] failed to find the extended preferred hint") - } - bestHint = extendedHint - } - - // the best hint might violate the NUMA allocation rule on which - // NUMA node cannot have both single and cross NUMA node allocations - // https://kubernetes.io/blog/2021/08/11/kubernetes-1-22-feature-memory-manager-moves-to-beta/#single-vs-cross-numa-node-allocation - if isAffinityViolatingNUMAAllocations(machineState, bestHint.NUMANodeAffinity) { - return fmt.Errorf("[memorymanager] preferred hint violates NUMA node allocation") - } - - var containerBlocks []state.Block - maskBits := bestHint.NUMANodeAffinity.GetBits() - for resourceName, requestedSize := range requestedResources { - // update memory blocks - containerBlocks = append(containerBlocks, state.Block{ - NUMAAffinity: maskBits, - Size: requestedSize, - Type: resourceName, - }) - - podReusableMemory := p.getPodReusableMemory(pod, bestHint.NUMANodeAffinity, resourceName) - if podReusableMemory >= requestedSize { - requestedSize = 0 - } else { - requestedSize -= podReusableMemory - } - - // Update nodes memory state - p.updateMachineState(machineState, maskBits, resourceName, requestedSize) - } - - p.updatePodReusableMemory(pod, container, containerBlocks) - - s.SetMachineState(machineState) - s.SetMemoryBlocks(podUID, container.Name, containerBlocks) - - // update init containers memory blocks to reflect the fact that we re-used init containers memory - // it is possible that the size of the init container memory block will have 0 value, when all memory - // allocated for it was re-used - // we only do this so that the sum(memory_for_all_containers) == total amount of allocated memory to the pod, even - // though the final state here doesn't accurately reflect what was (in reality) allocated to each container - // TODO: we should refactor our state structs to reflect the amount of the re-used memory - p.updateInitContainersMemoryBlocks(s, pod, container, containerBlocks) - - klog.V(4).InfoS("Allocated exclusive memory", "pod", klog.KObj(pod), "containerName", container.Name) - return nil -} - -func (p *staticPolicy) updateMachineState(machineState state.NUMANodeMap, numaAffinity []int, resourceName v1.ResourceName, requestedSize uint64) { - for _, nodeID := range numaAffinity { - machineState[nodeID].NumberOfAssignments++ - machineState[nodeID].Cells = numaAffinity - - // we need to continue to update all affinity mask nodes - if requestedSize == 0 { - continue - } - - // update the node memory state - nodeResourceMemoryState := machineState[nodeID].MemoryMap[resourceName] - if nodeResourceMemoryState.Free <= 0 { - continue - } - - // the node has enough memory to satisfy the request - if nodeResourceMemoryState.Free >= requestedSize { - nodeResourceMemoryState.Reserved += requestedSize - nodeResourceMemoryState.Free -= requestedSize - requestedSize = 0 - continue - } - - // the node does not have enough memory, use the node remaining memory and move to the next node - requestedSize -= nodeResourceMemoryState.Free - nodeResourceMemoryState.Reserved += nodeResourceMemoryState.Free - nodeResourceMemoryState.Free = 0 - } -} - -func (p *staticPolicy) getPodReusableMemory(pod *v1.Pod, numaAffinity bitmask.BitMask, resourceName v1.ResourceName) uint64 { - podReusableMemory, ok := p.initContainersReusableMemory[string(pod.UID)] - if !ok { - return 0 - } - - numaReusableMemory, ok := podReusableMemory[numaAffinity.String()] - if !ok { - return 0 - } - - return numaReusableMemory[resourceName] -} - -// RemoveContainer call is idempotent -func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerName string) { - blocks := s.GetMemoryBlocks(podUID, containerName) - if blocks == nil { - return - } - - klog.InfoS("RemoveContainer", "podUID", podUID, "containerName", containerName) - s.Delete(podUID, containerName) - - // Mutate machine memory state to update free and reserved memory - machineState := s.GetMachineState() - for _, b := range blocks { - releasedSize := b.Size - for _, nodeID := range b.NUMAAffinity { - machineState[nodeID].NumberOfAssignments-- - - // once we do not have any memory allocations on this node, clear node groups - if machineState[nodeID].NumberOfAssignments == 0 { - machineState[nodeID].Cells = []int{nodeID} - } - - // we still need to pass over all NUMA node under the affinity mask to update them - if releasedSize == 0 { - continue - } - - nodeResourceMemoryState := machineState[nodeID].MemoryMap[b.Type] - - // if the node does not have reserved memory to free, continue to the next node - if nodeResourceMemoryState.Reserved == 0 { - continue - } - - // the reserved memory smaller than the amount of the memory that should be released - // release as much as possible and move to the next node - if nodeResourceMemoryState.Reserved < releasedSize { - releasedSize -= nodeResourceMemoryState.Reserved - nodeResourceMemoryState.Free += nodeResourceMemoryState.Reserved - nodeResourceMemoryState.Reserved = 0 - continue - } - - // the reserved memory big enough to satisfy the released memory - nodeResourceMemoryState.Free += releasedSize - nodeResourceMemoryState.Reserved -= releasedSize - releasedSize = 0 - } - } - - s.SetMachineState(machineState) -} - -func regenerateHints(pod *v1.Pod, ctn *v1.Container, ctnBlocks []state.Block, reqRsrc map[v1.ResourceName]uint64) map[string][]topologymanager.TopologyHint { - hints := map[string][]topologymanager.TopologyHint{} - for resourceName := range reqRsrc { - hints[string(resourceName)] = []topologymanager.TopologyHint{} - } - - if len(ctnBlocks) != len(reqRsrc) { - klog.InfoS("The number of requested resources by the container differs from the number of memory blocks", "pod", klog.KObj(pod), "containerName", ctn.Name) - return nil - } - - for _, b := range ctnBlocks { - if _, ok := reqRsrc[b.Type]; !ok { - klog.InfoS("Container requested resources but none available of this type", "pod", klog.KObj(pod), "containerName", ctn.Name, "type", b.Type) - return nil - } - - if b.Size != reqRsrc[b.Type] { - klog.InfoS("Memory already allocated with different numbers than requested", "pod", klog.KObj(pod), "containerName", ctn.Name, "type", b.Type, "requestedResource", reqRsrc[b.Type], "allocatedSize", b.Size) - return nil - } - - containerNUMAAffinity, err := bitmask.NewBitMask(b.NUMAAffinity...) - if err != nil { - klog.ErrorS(err, "Failed to generate NUMA bitmask", "pod", klog.KObj(pod), "containerName", ctn.Name, "type", b.Type) - return nil - } - - klog.InfoS("Regenerating TopologyHints, resource was already allocated to pod", "resourceName", b.Type, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", ctn.Name) - hints[string(b.Type)] = append(hints[string(b.Type)], topologymanager.TopologyHint{ - NUMANodeAffinity: containerNUMAAffinity, - Preferred: true, - }) - } - return hints -} - -func getPodRequestedResources(pod *v1.Pod) (map[v1.ResourceName]uint64, error) { - // Maximun resources requested by init containers at any given time. - reqRsrcsByInitCtrs := make(map[v1.ResourceName]uint64) - // Total resources requested by restartable init containers. - reqRsrcsByRestartableInitCtrs := make(map[v1.ResourceName]uint64) - for _, ctr := range pod.Spec.InitContainers { - reqRsrcs, err := getRequestedResources(pod, &ctr) - - if err != nil { - return nil, err - } - for rsrcName, qty := range reqRsrcs { - if _, ok := reqRsrcsByInitCtrs[rsrcName]; !ok { - reqRsrcsByInitCtrs[rsrcName] = uint64(0) - } - - // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission - // for the detail. - if podutil.IsRestartableInitContainer(&ctr) { - reqRsrcsByRestartableInitCtrs[rsrcName] += qty - } else if reqRsrcsByRestartableInitCtrs[rsrcName]+qty > reqRsrcsByInitCtrs[rsrcName] { - reqRsrcsByInitCtrs[rsrcName] = reqRsrcsByRestartableInitCtrs[rsrcName] + qty - } - } - } - - reqRsrcsByAppCtrs := make(map[v1.ResourceName]uint64) - for _, ctr := range pod.Spec.Containers { - reqRsrcs, err := getRequestedResources(pod, &ctr) - - if err != nil { - return nil, err - } - for rsrcName, qty := range reqRsrcs { - if _, ok := reqRsrcsByAppCtrs[rsrcName]; !ok { - reqRsrcsByAppCtrs[rsrcName] = uint64(0) - } - - reqRsrcsByAppCtrs[rsrcName] += qty - } - } - - reqRsrcs := make(map[v1.ResourceName]uint64) - for rsrcName := range reqRsrcsByAppCtrs { - // Total resources requested by long-running containers. - reqRsrcsByLongRunningCtrs := reqRsrcsByAppCtrs[rsrcName] + reqRsrcsByRestartableInitCtrs[rsrcName] - reqRsrcs[rsrcName] = reqRsrcsByLongRunningCtrs - - if reqRsrcs[rsrcName] < reqRsrcsByInitCtrs[rsrcName] { - reqRsrcs[rsrcName] = reqRsrcsByInitCtrs[rsrcName] - } - } - return reqRsrcs, nil -} - -func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint { - if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed { - return nil - } - - reqRsrcs, err := getPodRequestedResources(pod) - if err != nil { - klog.ErrorS(err, "Failed to get pod requested resources", "pod", klog.KObj(pod), "podUID", pod.UID) - return nil - } - - for _, ctn := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - containerBlocks := s.GetMemoryBlocks(string(pod.UID), ctn.Name) - // Short circuit to regenerate the same hints if there are already - // memory allocated for the container. This might happen after a - // kubelet restart, for example. - if containerBlocks != nil { - return regenerateHints(pod, &ctn, containerBlocks, reqRsrcs) - } - } - - // the pod topology hints calculated only once for all containers, so no need to pass re-usable state - return p.calculateHints(s.GetMachineState(), pod, reqRsrcs) -} - -// GetTopologyHints implements the topologymanager.HintProvider Interface -// and is consulted to achieve NUMA aware resource alignment among this -// and other resource controllers. -func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed { - return nil - } - - requestedResources, err := getRequestedResources(pod, container) - if err != nil { - klog.ErrorS(err, "Failed to get container requested resources", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name) - return nil - } - - containerBlocks := s.GetMemoryBlocks(string(pod.UID), container.Name) - // Short circuit to regenerate the same hints if there are already - // memory allocated for the container. This might happen after a - // kubelet restart, for example. - if containerBlocks != nil { - return regenerateHints(pod, container, containerBlocks, requestedResources) - } - - return p.calculateHints(s.GetMachineState(), pod, requestedResources) -} - -func getRequestedResources(pod *v1.Pod, container *v1.Container) (map[v1.ResourceName]uint64, error) { - requestedResources := map[v1.ResourceName]uint64{} - resources := container.Resources.Requests - // In-place pod resize feature makes Container.Resources field mutable for CPU & memory. - // AllocatedResources holds the value of Container.Resources.Requests when the pod was admitted. - // We should return this value because this is what kubelet agreed to allocate for the container - // and the value configured with runtime. - if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - containerStatuses := pod.Status.ContainerStatuses - if podutil.IsRestartableInitContainer(container) { - if len(pod.Status.InitContainerStatuses) != 0 { - containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...) - } - } - if cs, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok { - resources = cs.AllocatedResources - } - } - for resourceName, quantity := range resources { - if resourceName != v1.ResourceMemory && !corehelper.IsHugePageResourceName(resourceName) { - continue - } - requestedSize, succeed := quantity.AsInt64() - if !succeed { - return nil, fmt.Errorf("[memorymanager] failed to represent quantity as int64") - } - requestedResources[resourceName] = uint64(requestedSize) - } - return requestedResources, nil -} - -func (p *staticPolicy) calculateHints(machineState state.NUMANodeMap, pod *v1.Pod, requestedResources map[v1.ResourceName]uint64) map[string][]topologymanager.TopologyHint { - var numaNodes []int - for n := range machineState { - numaNodes = append(numaNodes, n) - } - sort.Ints(numaNodes) - - // Initialize minAffinitySize to include all NUMA Cells. - minAffinitySize := len(numaNodes) - - hints := map[string][]topologymanager.TopologyHint{} - bitmask.IterateBitMasks(numaNodes, func(mask bitmask.BitMask) { - maskBits := mask.GetBits() - singleNUMAHint := len(maskBits) == 1 - - totalFreeSize := map[v1.ResourceName]uint64{} - totalAllocatableSize := map[v1.ResourceName]uint64{} - // calculate total free and allocatable memory for the node mask - for _, nodeID := range maskBits { - for resourceName := range requestedResources { - if _, ok := totalFreeSize[resourceName]; !ok { - totalFreeSize[resourceName] = 0 - } - totalFreeSize[resourceName] += machineState[nodeID].MemoryMap[resourceName].Free - - if _, ok := totalAllocatableSize[resourceName]; !ok { - totalAllocatableSize[resourceName] = 0 - } - totalAllocatableSize[resourceName] += machineState[nodeID].MemoryMap[resourceName].Allocatable - } - } - - // verify that for all memory types the node mask has enough allocatable resources - for resourceName, requestedSize := range requestedResources { - if totalAllocatableSize[resourceName] < requestedSize { - return - } - } - - // set the minimum amount of NUMA nodes that can satisfy the container resources requests - if mask.Count() < minAffinitySize { - minAffinitySize = mask.Count() - } - - // the node already in group with another node, it can not be used for the single NUMA node allocation - if singleNUMAHint && len(machineState[maskBits[0]].Cells) > 1 { - return - } - - for _, nodeID := range maskBits { - // the node already used for the memory allocation - if !singleNUMAHint && machineState[nodeID].NumberOfAssignments > 0 { - // the node used for the single NUMA memory allocation, it can not be used for the multi NUMA node allocation - if len(machineState[nodeID].Cells) == 1 { - return - } - - // the node already used with different group of nodes, it can not be use with in the current hint - if !areGroupsEqual(machineState[nodeID].Cells, maskBits) { - return - } - } - } - - // verify that for all memory types the node mask has enough free resources - for resourceName, requestedSize := range requestedResources { - podReusableMemory := p.getPodReusableMemory(pod, mask, resourceName) - if totalFreeSize[resourceName]+podReusableMemory < requestedSize { - return - } - } - - // add the node mask as topology hint for all memory types - for resourceName := range requestedResources { - if _, ok := hints[string(resourceName)]; !ok { - hints[string(resourceName)] = []topologymanager.TopologyHint{} - } - hints[string(resourceName)] = append(hints[string(resourceName)], topologymanager.TopologyHint{ - NUMANodeAffinity: mask, - Preferred: false, - }) - } - }) - - // update hints preferred according to multiNUMAGroups, in case when it wasn't provided, the default - // behaviour to prefer the minimal amount of NUMA nodes will be used - for resourceName := range requestedResources { - for i, hint := range hints[string(resourceName)] { - hints[string(resourceName)][i].Preferred = p.isHintPreferred(hint.NUMANodeAffinity.GetBits(), minAffinitySize) - } - } - - return hints -} - -func (p *staticPolicy) isHintPreferred(maskBits []int, minAffinitySize int) bool { - return len(maskBits) == minAffinitySize -} - -func areGroupsEqual(group1, group2 []int) bool { - sort.Ints(group1) - sort.Ints(group2) - - if len(group1) != len(group2) { - return false - } - - for i, elm := range group1 { - if group2[i] != elm { - return false - } - } - return true -} - -func (p *staticPolicy) validateState(s state.State) error { - machineState := s.GetMachineState() - memoryAssignments := s.GetMemoryAssignments() - - if len(machineState) == 0 { - // Machine state cannot be empty when assignments exist - if len(memoryAssignments) != 0 { - return fmt.Errorf("[memorymanager] machine state can not be empty when it has memory assignments") - } - - defaultMachineState := p.getDefaultMachineState() - s.SetMachineState(defaultMachineState) - - return nil - } - - // calculate all memory assigned to containers - expectedMachineState := p.getDefaultMachineState() - for pod, container := range memoryAssignments { - for containerName, blocks := range container { - for _, b := range blocks { - requestedSize := b.Size - for _, nodeID := range b.NUMAAffinity { - nodeState, ok := expectedMachineState[nodeID] - if !ok { - return fmt.Errorf("[memorymanager] (pod: %s, container: %s) the memory assignment uses the NUMA that does not exist", pod, containerName) - } - - nodeState.NumberOfAssignments++ - nodeState.Cells = b.NUMAAffinity - - memoryState, ok := nodeState.MemoryMap[b.Type] - if !ok { - return fmt.Errorf("[memorymanager] (pod: %s, container: %s) the memory assignment uses memory resource that does not exist", pod, containerName) - } - - if requestedSize == 0 { - continue - } - - // this node does not have enough memory continue to the next one - if memoryState.Free <= 0 { - continue - } - - // the node has enough memory to satisfy the request - if memoryState.Free >= requestedSize { - memoryState.Reserved += requestedSize - memoryState.Free -= requestedSize - requestedSize = 0 - continue - } - - // the node does not have enough memory, use the node remaining memory and move to the next node - requestedSize -= memoryState.Free - memoryState.Reserved += memoryState.Free - memoryState.Free = 0 - } - } - } - } - - // State has already been initialized from file (is not empty) - // Validate that total size, system reserved and reserved memory not changed, it can happen, when: - // - adding or removing physical memory bank from the node - // - change of kubelet system-reserved, kube-reserved or pre-reserved-memory-zone parameters - if !areMachineStatesEqual(machineState, expectedMachineState) { - return fmt.Errorf("[memorymanager] the expected machine state is different from the real one") - } - - return nil -} - -func areMachineStatesEqual(ms1, ms2 state.NUMANodeMap) bool { - if len(ms1) != len(ms2) { - klog.InfoS("Node states were different", "lengthNode1", len(ms1), "lengthNode2", len(ms2)) - return false - } - - for nodeID, nodeState1 := range ms1 { - nodeState2, ok := ms2[nodeID] - if !ok { - klog.InfoS("Node state didn't have node ID", "nodeID", nodeID) - return false - } - - if nodeState1.NumberOfAssignments != nodeState2.NumberOfAssignments { - klog.InfoS("Node state had a different number of memory assignments.", "assignment1", nodeState1.NumberOfAssignments, "assignment2", nodeState2.NumberOfAssignments) - return false - } - - if !areGroupsEqual(nodeState1.Cells, nodeState2.Cells) { - klog.InfoS("Node states had different groups", "stateNode1", nodeState1.Cells, "stateNode2", nodeState2.Cells) - return false - } - - if len(nodeState1.MemoryMap) != len(nodeState2.MemoryMap) { - klog.InfoS("Node state had memory maps of different lengths", "lengthNode1", len(nodeState1.MemoryMap), "lengthNode2", len(nodeState2.MemoryMap)) - return false - } - - for resourceName, memoryState1 := range nodeState1.MemoryMap { - memoryState2, ok := nodeState2.MemoryMap[resourceName] - if !ok { - klog.InfoS("Memory state didn't have resource", "resource", resourceName) - return false - } - - if !areMemoryStatesEqual(memoryState1, memoryState2, nodeID, resourceName) { - return false - } - - tmpState1 := state.MemoryTable{} - tmpState2 := state.MemoryTable{} - for _, nodeID := range nodeState1.Cells { - tmpState1.Free += ms1[nodeID].MemoryMap[resourceName].Free - tmpState1.Reserved += ms1[nodeID].MemoryMap[resourceName].Reserved - tmpState2.Free += ms2[nodeID].MemoryMap[resourceName].Free - tmpState2.Reserved += ms2[nodeID].MemoryMap[resourceName].Reserved - } - - if tmpState1.Free != tmpState2.Free { - klog.InfoS("NUMA node and resource had different memory states", "node", nodeID, "resource", resourceName, "field", "free", "free1", tmpState1.Free, "free2", tmpState2.Free, "memoryState1", *memoryState1, "memoryState2", *memoryState2) - return false - } - if tmpState1.Reserved != tmpState2.Reserved { - klog.InfoS("NUMA node and resource had different memory states", "node", nodeID, "resource", resourceName, "field", "reserved", "reserved1", tmpState1.Reserved, "reserved2", tmpState2.Reserved, "memoryState1", *memoryState1, "memoryState2", *memoryState2) - return false - } - } - } - return true -} - -func areMemoryStatesEqual(memoryState1, memoryState2 *state.MemoryTable, nodeID int, resourceName v1.ResourceName) bool { - if memoryState1.TotalMemSize != memoryState2.TotalMemSize { - klog.InfoS("Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "TotalMemSize", "TotalMemSize1", memoryState1.TotalMemSize, "TotalMemSize2", memoryState2.TotalMemSize, "memoryState1", *memoryState1, "memoryState2", *memoryState2) - return false - } - - if memoryState1.SystemReserved != memoryState2.SystemReserved { - klog.InfoS("Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "SystemReserved", "SystemReserved1", memoryState1.SystemReserved, "SystemReserved2", memoryState2.SystemReserved, "memoryState1", *memoryState1, "memoryState2", *memoryState2) - return false - } - - if memoryState1.Allocatable != memoryState2.Allocatable { - klog.InfoS("Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "Allocatable", "Allocatable1", memoryState1.Allocatable, "Allocatable2", memoryState2.Allocatable, "memoryState1", *memoryState1, "memoryState2", *memoryState2) - return false - } - return true -} - -func (p *staticPolicy) getDefaultMachineState() state.NUMANodeMap { - defaultMachineState := state.NUMANodeMap{} - nodeHugepages := map[int]uint64{} - for _, node := range p.machineInfo.Topology { - defaultMachineState[node.Id] = &state.NUMANodeState{ - NumberOfAssignments: 0, - MemoryMap: map[v1.ResourceName]*state.MemoryTable{}, - Cells: []int{node.Id}, - } - - // fill memory table with huge pages values - for _, hugepage := range node.HugePages { - hugepageQuantity := resource.NewQuantity(int64(hugepage.PageSize)*1024, resource.BinarySI) - resourceName := corehelper.HugePageResourceName(*hugepageQuantity) - systemReserved := p.getResourceSystemReserved(node.Id, resourceName) - totalHugepagesSize := hugepage.NumPages * hugepage.PageSize * 1024 - allocatable := totalHugepagesSize - systemReserved - defaultMachineState[node.Id].MemoryMap[resourceName] = &state.MemoryTable{ - Allocatable: allocatable, - Free: allocatable, - Reserved: 0, - SystemReserved: systemReserved, - TotalMemSize: totalHugepagesSize, - } - if _, ok := nodeHugepages[node.Id]; !ok { - nodeHugepages[node.Id] = 0 - } - nodeHugepages[node.Id] += totalHugepagesSize - } - - // fill memory table with regular memory values - systemReserved := p.getResourceSystemReserved(node.Id, v1.ResourceMemory) - - allocatable := node.Memory - systemReserved - // remove memory allocated by hugepages - if allocatedByHugepages, ok := nodeHugepages[node.Id]; ok { - allocatable -= allocatedByHugepages - } - defaultMachineState[node.Id].MemoryMap[v1.ResourceMemory] = &state.MemoryTable{ - Allocatable: allocatable, - Free: allocatable, - Reserved: 0, - SystemReserved: systemReserved, - TotalMemSize: node.Memory, - } - } - return defaultMachineState -} - -func (p *staticPolicy) getResourceSystemReserved(nodeID int, resourceName v1.ResourceName) uint64 { - var systemReserved uint64 - if nodeSystemReserved, ok := p.systemReserved[nodeID]; ok { - if nodeMemorySystemReserved, ok := nodeSystemReserved[resourceName]; ok { - systemReserved = nodeMemorySystemReserved - } - } - return systemReserved -} - -func (p *staticPolicy) getDefaultHint(machineState state.NUMANodeMap, pod *v1.Pod, requestedResources map[v1.ResourceName]uint64) (*topologymanager.TopologyHint, error) { - hints := p.calculateHints(machineState, pod, requestedResources) - if len(hints) < 1 { - return nil, fmt.Errorf("[memorymanager] failed to get the default NUMA affinity, no NUMA nodes with enough memory is available") - } - - // hints for all memory types should be the same, so we will check hints only for regular memory type - return findBestHint(hints[string(v1.ResourceMemory)]), nil -} - -func isAffinitySatisfyRequest(machineState state.NUMANodeMap, mask bitmask.BitMask, requestedResources map[v1.ResourceName]uint64) bool { - totalFreeSize := map[v1.ResourceName]uint64{} - for _, nodeID := range mask.GetBits() { - for resourceName := range requestedResources { - if _, ok := totalFreeSize[resourceName]; !ok { - totalFreeSize[resourceName] = 0 - } - totalFreeSize[resourceName] += machineState[nodeID].MemoryMap[resourceName].Free - } - } - - // verify that for all memory types the node mask has enough resources - for resourceName, requestedSize := range requestedResources { - if totalFreeSize[resourceName] < requestedSize { - return false - } - } - - return true -} - -// extendTopologyManagerHint extends the topology manager hint, in case when it does not satisfy to the container request -// the topology manager uses bitwise AND to merge all topology hints into the best one, so in case of the restricted policy, -// it possible that we will get the subset of hint that we provided to the topology manager, in this case we want to extend -// it to the original one -func (p *staticPolicy) extendTopologyManagerHint(machineState state.NUMANodeMap, pod *v1.Pod, requestedResources map[v1.ResourceName]uint64, mask bitmask.BitMask) (*topologymanager.TopologyHint, error) { - hints := p.calculateHints(machineState, pod, requestedResources) - - var filteredHints []topologymanager.TopologyHint - // hints for all memory types should be the same, so we will check hints only for regular memory type - for _, hint := range hints[string(v1.ResourceMemory)] { - affinityBits := hint.NUMANodeAffinity.GetBits() - // filter all hints that does not include currentHint - if isHintInGroup(mask.GetBits(), affinityBits) { - filteredHints = append(filteredHints, hint) - } - } - - if len(filteredHints) < 1 { - return nil, fmt.Errorf("[memorymanager] failed to find NUMA nodes to extend the current topology hint") - } - - // try to find the preferred hint with the minimal number of NUMA nodes, relevant for the restricted policy - return findBestHint(filteredHints), nil -} - -func isHintInGroup(hint []int, group []int) bool { - sort.Ints(hint) - sort.Ints(group) - - hintIndex := 0 - for i := range group { - if hintIndex == len(hint) { - return true - } - - if group[i] != hint[hintIndex] { - continue - } - hintIndex++ - } - - return hintIndex == len(hint) -} - -func findBestHint(hints []topologymanager.TopologyHint) *topologymanager.TopologyHint { - // try to find the preferred hint with the minimal number of NUMA nodes, relevant for the restricted policy - bestHint := topologymanager.TopologyHint{} - for _, hint := range hints { - if bestHint.NUMANodeAffinity == nil { - bestHint = hint - continue - } - - // preferred of the current hint is true, when the extendedHint preferred is false - if hint.Preferred && !bestHint.Preferred { - bestHint = hint - continue - } - - // both hints has the same preferred value, but the current hint has less NUMA nodes than the extended one - if hint.Preferred == bestHint.Preferred && hint.NUMANodeAffinity.IsNarrowerThan(bestHint.NUMANodeAffinity) { - bestHint = hint - } - } - return &bestHint -} - -// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node -func (p *staticPolicy) GetAllocatableMemory(s state.State) []state.Block { - var allocatableMemory []state.Block - machineState := s.GetMachineState() - for numaNodeID, numaNodeState := range machineState { - for resourceName, memoryTable := range numaNodeState.MemoryMap { - if memoryTable.Allocatable == 0 { - continue - } - - block := state.Block{ - NUMAAffinity: []int{numaNodeID}, - Type: resourceName, - Size: memoryTable.Allocatable, - } - allocatableMemory = append(allocatableMemory, block) - } - } - return allocatableMemory -} - -func (p *staticPolicy) updatePodReusableMemory(pod *v1.Pod, container *v1.Container, memoryBlocks []state.Block) { - podUID := string(pod.UID) - - // If pod entries to m.initContainersReusableMemory other than the current pod exist, delete them. - for uid := range p.initContainersReusableMemory { - if podUID != uid { - delete(p.initContainersReusableMemory, uid) - } - } - - if isRegularInitContainer(pod, container) { - if _, ok := p.initContainersReusableMemory[podUID]; !ok { - p.initContainersReusableMemory[podUID] = map[string]map[v1.ResourceName]uint64{} - } - - for _, block := range memoryBlocks { - blockBitMask, _ := bitmask.NewBitMask(block.NUMAAffinity...) - blockBitMaskString := blockBitMask.String() - - if _, ok := p.initContainersReusableMemory[podUID][blockBitMaskString]; !ok { - p.initContainersReusableMemory[podUID][blockBitMaskString] = map[v1.ResourceName]uint64{} - } - - if blockReusableMemory := p.initContainersReusableMemory[podUID][blockBitMaskString][block.Type]; block.Size > blockReusableMemory { - p.initContainersReusableMemory[podUID][blockBitMaskString][block.Type] = block.Size - } - } - - return - } - - // update re-usable memory once it used by the app container - for _, block := range memoryBlocks { - blockBitMask, _ := bitmask.NewBitMask(block.NUMAAffinity...) - if podReusableMemory := p.getPodReusableMemory(pod, blockBitMask, block.Type); podReusableMemory != 0 { - if block.Size >= podReusableMemory { - p.initContainersReusableMemory[podUID][blockBitMask.String()][block.Type] = 0 - } else { - p.initContainersReusableMemory[podUID][blockBitMask.String()][block.Type] -= block.Size - } - } - } -} - -func (p *staticPolicy) updateInitContainersMemoryBlocks(s state.State, pod *v1.Pod, container *v1.Container, containerMemoryBlocks []state.Block) { - podUID := string(pod.UID) - - for _, containerBlock := range containerMemoryBlocks { - blockSize := containerBlock.Size - for _, initContainer := range pod.Spec.InitContainers { - // we do not want to continue updates once we reach the current container - if initContainer.Name == container.Name { - break - } - - if blockSize == 0 { - break - } - - if podutil.IsRestartableInitContainer(&initContainer) { - // we should not reuse the resource from any restartable init - // container - continue - } - - initContainerBlocks := s.GetMemoryBlocks(podUID, initContainer.Name) - if len(initContainerBlocks) == 0 { - continue - } - - for i := range initContainerBlocks { - initContainerBlock := &initContainerBlocks[i] - if initContainerBlock.Size == 0 { - continue - } - - if initContainerBlock.Type != containerBlock.Type { - continue - } - - if !isNUMAAffinitiesEqual(initContainerBlock.NUMAAffinity, containerBlock.NUMAAffinity) { - continue - } - - if initContainerBlock.Size > blockSize { - initContainerBlock.Size -= blockSize - blockSize = 0 - } else { - blockSize -= initContainerBlock.Size - initContainerBlock.Size = 0 - } - } - - s.SetMemoryBlocks(podUID, initContainer.Name, initContainerBlocks) - } - } -} - -func isRegularInitContainer(pod *v1.Pod, container *v1.Container) bool { - for _, initContainer := range pod.Spec.InitContainers { - if initContainer.Name == container.Name { - return !podutil.IsRestartableInitContainer(&initContainer) - } - } - - return false -} - -func isNUMAAffinitiesEqual(numaAffinity1, numaAffinity2 []int) bool { - bitMask1, err := bitmask.NewBitMask(numaAffinity1...) - if err != nil { - klog.ErrorS(err, "failed to create bit mask", "numaAffinity1", numaAffinity1) - return false - } - - bitMask2, err := bitmask.NewBitMask(numaAffinity2...) - if err != nil { - klog.ErrorS(err, "failed to create bit mask", "numaAffinity2", numaAffinity2) - return false - } - - return bitMask1.IsEqual(bitMask2) -} - -func isAffinityViolatingNUMAAllocations(machineState state.NUMANodeMap, mask bitmask.BitMask) bool { - maskBits := mask.GetBits() - singleNUMAHint := len(maskBits) == 1 - for _, nodeID := range mask.GetBits() { - // the node was never used for the memory allocation - if machineState[nodeID].NumberOfAssignments == 0 { - continue - } - if singleNUMAHint { - continue - } - // the node used for the single NUMA memory allocation, it cannot be used for the multi NUMA node allocation - if len(machineState[nodeID].Cells) == 1 { - return true - } - // the node already used with a different group of nodes, it cannot be used within the current hint - if !areGroupsEqual(machineState[nodeID].Cells, maskBits) { - return true - } - } - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/checkpoint.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/checkpoint.go deleted file mode 100644 index 93b1302baa1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/checkpoint.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "encoding/json" - - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum" -) - -var _ checkpointmanager.Checkpoint = &MemoryManagerCheckpoint{} - -// MemoryManagerCheckpoint struct is used to store memory/pod assignments in a checkpoint -type MemoryManagerCheckpoint struct { - PolicyName string `json:"policyName"` - MachineState NUMANodeMap `json:"machineState"` - Entries ContainerMemoryAssignments `json:"entries,omitempty"` - Checksum checksum.Checksum `json:"checksum"` -} - -// NewMemoryManagerCheckpoint returns an instance of Checkpoint -func NewMemoryManagerCheckpoint() *MemoryManagerCheckpoint { - //nolint:staticcheck // unexported-type-in-api user-facing error message - return &MemoryManagerCheckpoint{ - Entries: ContainerMemoryAssignments{}, - MachineState: NUMANodeMap{}, - } -} - -// MarshalCheckpoint returns marshalled checkpoint -func (mp *MemoryManagerCheckpoint) MarshalCheckpoint() ([]byte, error) { - // make sure checksum wasn't set before so it doesn't affect output checksum - mp.Checksum = 0 - mp.Checksum = checksum.New(mp) - return json.Marshal(*mp) -} - -// UnmarshalCheckpoint tries to unmarshal passed bytes to checkpoint -func (mp *MemoryManagerCheckpoint) UnmarshalCheckpoint(blob []byte) error { - return json.Unmarshal(blob, mp) -} - -// VerifyChecksum verifies that current checksum of checkpoint is valid -func (mp *MemoryManagerCheckpoint) VerifyChecksum() error { - ck := mp.Checksum - mp.Checksum = 0 - err := ck.Verify(mp) - mp.Checksum = ck - return err -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state.go deleted file mode 100644 index 322ca608e4c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - v1 "k8s.io/api/core/v1" -) - -// MemoryTable contains memory information -type MemoryTable struct { - TotalMemSize uint64 `json:"total"` - SystemReserved uint64 `json:"systemReserved"` - Allocatable uint64 `json:"allocatable"` - Reserved uint64 `json:"reserved"` - Free uint64 `json:"free"` -} - -// NUMANodeState contains NUMA node related information -type NUMANodeState struct { - // NumberOfAssignments contains a number memory assignments from this node - // When the container requires memory and hugepages it will increase number of assignments by two - NumberOfAssignments int `json:"numberOfAssignments"` - // MemoryTable contains NUMA node memory related information - MemoryMap map[v1.ResourceName]*MemoryTable `json:"memoryMap"` - // Cells contains the current NUMA node and all other nodes that are in a group with current NUMA node - // This parameter indicates if the current node is used for the multiple NUMA node memory allocation - // For example if some container has pinning 0,1,2, NUMA nodes 0,1,2 under the state will have - // this parameter equals to [0, 1, 2] - Cells []int `json:"cells"` -} - -// NUMANodeMap contains memory information for each NUMA node. -type NUMANodeMap map[int]*NUMANodeState - -// Clone returns a copy of NUMANodeMap -func (nm NUMANodeMap) Clone() NUMANodeMap { - clone := make(NUMANodeMap) - for node, s := range nm { - if s == nil { - clone[node] = nil - continue - } - - clone[node] = &NUMANodeState{} - clone[node].NumberOfAssignments = s.NumberOfAssignments - clone[node].Cells = append([]int{}, s.Cells...) - - if s.MemoryMap == nil { - continue - } - - clone[node].MemoryMap = map[v1.ResourceName]*MemoryTable{} - for memoryType, memoryTable := range s.MemoryMap { - clone[node].MemoryMap[memoryType] = &MemoryTable{ - Allocatable: memoryTable.Allocatable, - Free: memoryTable.Free, - Reserved: memoryTable.Reserved, - SystemReserved: memoryTable.SystemReserved, - TotalMemSize: memoryTable.TotalMemSize, - } - } - } - return clone -} - -// Block is a data structure used to represent a certain amount of memory -type Block struct { - // NUMAAffinity contains the string that represents NUMA affinity bitmask - NUMAAffinity []int `json:"numaAffinity"` - Type v1.ResourceName `json:"type"` - Size uint64 `json:"size"` -} - -// ContainerMemoryAssignments stores memory assignments of containers -type ContainerMemoryAssignments map[string]map[string][]Block - -// Clone returns a copy of ContainerMemoryAssignments -func (as ContainerMemoryAssignments) Clone() ContainerMemoryAssignments { - clone := make(ContainerMemoryAssignments) - for pod := range as { - clone[pod] = make(map[string][]Block) - for container, blocks := range as[pod] { - clone[pod][container] = append([]Block{}, blocks...) - } - } - return clone -} - -// Reader interface used to read current memory/pod assignment state -type Reader interface { - // GetMachineState returns Memory Map stored in the State - GetMachineState() NUMANodeMap - // GetMemoryBlocks returns memory assignments of a container - GetMemoryBlocks(podUID string, containerName string) []Block - // GetMemoryAssignments returns ContainerMemoryAssignments - GetMemoryAssignments() ContainerMemoryAssignments -} - -type writer interface { - // SetMachineState stores NUMANodeMap in State - SetMachineState(memoryMap NUMANodeMap) - // SetMemoryBlocks stores memory assignments of a container - SetMemoryBlocks(podUID string, containerName string, blocks []Block) - // SetMemoryAssignments sets ContainerMemoryAssignments by using the passed parameter - SetMemoryAssignments(assignments ContainerMemoryAssignments) - // Delete deletes corresponding Blocks from ContainerMemoryAssignments - Delete(podUID string, containerName string) - // ClearState clears machineState and ContainerMemoryAssignments - ClearState() -} - -// State interface provides methods for tracking and setting memory/pod assignment -type State interface { - Reader - writer -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_checkpoint.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_checkpoint.go deleted file mode 100644 index dce0bda55a3..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_checkpoint.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "fmt" - "path/filepath" - "sync" - - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" - "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" -) - -var _ State = &stateCheckpoint{} - -type stateCheckpoint struct { - sync.RWMutex - cache State - policyName string - checkpointManager checkpointmanager.CheckpointManager - checkpointName string -} - -// NewCheckpointState creates new State for keeping track of memory/pod assignment with checkpoint backend -func NewCheckpointState(stateDir, checkpointName, policyName string) (State, error) { - checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir) - if err != nil { - return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err) - } - stateCheckpoint := &stateCheckpoint{ - cache: NewMemoryState(), - policyName: policyName, - checkpointManager: checkpointManager, - checkpointName: checkpointName, - } - - if err := stateCheckpoint.restoreState(); err != nil { - //nolint:staticcheck // ST1005 user-facing error message - return nil, fmt.Errorf("could not restore state from checkpoint: %v, please drain this node and delete the memory manager checkpoint file %q before restarting Kubelet", - err, filepath.Join(stateDir, checkpointName)) - } - - return stateCheckpoint, nil -} - -// restores state from a checkpoint and creates it if it doesn't exist -func (sc *stateCheckpoint) restoreState() error { - sc.Lock() - defer sc.Unlock() - var err error - - checkpoint := NewMemoryManagerCheckpoint() - if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpoint); err != nil { - if err == errors.ErrCheckpointNotFound { - return sc.storeState() - } - return err - } - - if sc.policyName != checkpoint.PolicyName { - return fmt.Errorf("[memorymanager] configured policy %q differs from state checkpoint policy %q", sc.policyName, checkpoint.PolicyName) - } - - sc.cache.SetMachineState(checkpoint.MachineState) - sc.cache.SetMemoryAssignments(checkpoint.Entries) - - klog.V(2).InfoS("State checkpoint: restored state from checkpoint") - - return nil -} - -// saves state to a checkpoint, caller is responsible for locking -func (sc *stateCheckpoint) storeState() error { - checkpoint := NewMemoryManagerCheckpoint() - checkpoint.PolicyName = sc.policyName - checkpoint.MachineState = sc.cache.GetMachineState() - checkpoint.Entries = sc.cache.GetMemoryAssignments() - - err := sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint) - if err != nil { - klog.ErrorS(err, "Could not save checkpoint") - return err - } - return nil -} - -// GetMemoryState returns Memory Map stored in the State -func (sc *stateCheckpoint) GetMachineState() NUMANodeMap { - sc.RLock() - defer sc.RUnlock() - - return sc.cache.GetMachineState() -} - -// GetMemoryBlocks returns memory assignments of a container -func (sc *stateCheckpoint) GetMemoryBlocks(podUID string, containerName string) []Block { - sc.RLock() - defer sc.RUnlock() - - return sc.cache.GetMemoryBlocks(podUID, containerName) -} - -// GetMemoryAssignments returns ContainerMemoryAssignments -func (sc *stateCheckpoint) GetMemoryAssignments() ContainerMemoryAssignments { - sc.RLock() - defer sc.RUnlock() - - return sc.cache.GetMemoryAssignments() -} - -// SetMachineState stores NUMANodeMap in State -func (sc *stateCheckpoint) SetMachineState(memoryMap NUMANodeMap) { - sc.Lock() - defer sc.Unlock() - - sc.cache.SetMachineState(memoryMap) - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint") - } -} - -// SetMemoryBlocks stores memory assignments of container -func (sc *stateCheckpoint) SetMemoryBlocks(podUID string, containerName string, blocks []Block) { - sc.Lock() - defer sc.Unlock() - - sc.cache.SetMemoryBlocks(podUID, containerName, blocks) - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName) - } -} - -// SetMemoryAssignments sets ContainerMemoryAssignments by using the passed parameter -func (sc *stateCheckpoint) SetMemoryAssignments(assignments ContainerMemoryAssignments) { - sc.Lock() - defer sc.Unlock() - - sc.cache.SetMemoryAssignments(assignments) - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint") - } -} - -// Delete deletes corresponding Blocks from ContainerMemoryAssignments -func (sc *stateCheckpoint) Delete(podUID string, containerName string) { - sc.Lock() - defer sc.Unlock() - - sc.cache.Delete(podUID, containerName) - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName) - } -} - -// ClearState clears machineState and ContainerMemoryAssignments -func (sc *stateCheckpoint) ClearState() { - sc.Lock() - defer sc.Unlock() - - sc.cache.ClearState() - err := sc.storeState() - if err != nil { - klog.ErrorS(err, "Failed to store state to checkpoint") - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_mem.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_mem.go deleted file mode 100644 index 19861c6e35a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_mem.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "sync" - - "k8s.io/klog/v2" -) - -type stateMemory struct { - sync.RWMutex - assignments ContainerMemoryAssignments - machineState NUMANodeMap -} - -var _ State = &stateMemory{} - -// NewMemoryState creates new State for keeping track of cpu/pod assignment -func NewMemoryState() State { - klog.InfoS("Initializing new in-memory state store") - return &stateMemory{ - assignments: ContainerMemoryAssignments{}, - machineState: NUMANodeMap{}, - } -} - -// GetMemoryState returns Memory Map stored in the State -func (s *stateMemory) GetMachineState() NUMANodeMap { - s.RLock() - defer s.RUnlock() - - return s.machineState.Clone() -} - -// GetMemoryBlocks returns memory assignments of a container -func (s *stateMemory) GetMemoryBlocks(podUID string, containerName string) []Block { - s.RLock() - defer s.RUnlock() - - if res, ok := s.assignments[podUID][containerName]; ok { - return append([]Block{}, res...) - } - return nil -} - -// GetMemoryAssignments returns ContainerMemoryAssignments -func (s *stateMemory) GetMemoryAssignments() ContainerMemoryAssignments { - s.RLock() - defer s.RUnlock() - - return s.assignments.Clone() -} - -// SetMachineState stores NUMANodeMap in State -func (s *stateMemory) SetMachineState(nodeMap NUMANodeMap) { - s.Lock() - defer s.Unlock() - - s.machineState = nodeMap.Clone() - klog.InfoS("Updated machine memory state") -} - -// SetMemoryBlocks stores memory assignments of container -func (s *stateMemory) SetMemoryBlocks(podUID string, containerName string, blocks []Block) { - s.Lock() - defer s.Unlock() - - if _, ok := s.assignments[podUID]; !ok { - s.assignments[podUID] = map[string][]Block{} - } - - s.assignments[podUID][containerName] = append([]Block{}, blocks...) - klog.InfoS("Updated memory state", "podUID", podUID, "containerName", containerName) -} - -// SetMemoryAssignments sets ContainerMemoryAssignments by using the passed parameter -func (s *stateMemory) SetMemoryAssignments(assignments ContainerMemoryAssignments) { - s.Lock() - defer s.Unlock() - - s.assignments = assignments.Clone() - klog.V(5).InfoS("Updated Memory assignments", "assignments", assignments) -} - -// Delete deletes corresponding Blocks from ContainerMemoryAssignments -func (s *stateMemory) Delete(podUID string, containerName string) { - s.Lock() - defer s.Unlock() - - if _, ok := s.assignments[podUID]; !ok { - return - } - - delete(s.assignments[podUID], containerName) - if len(s.assignments[podUID]) == 0 { - delete(s.assignments, podUID) - } - klog.V(2).InfoS("Deleted memory assignment", "podUID", podUID, "containerName", containerName) -} - -// ClearState clears machineState and ContainerMemoryAssignments -func (s *stateMemory) ClearState() { - s.Lock() - defer s.Unlock() - - s.machineState = NUMANodeMap{} - s.assignments = make(ContainerMemoryAssignments) - klog.V(2).InfoS("Cleared state") -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go deleted file mode 100644 index 7fa180cefdd..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go +++ /dev/null @@ -1,328 +0,0 @@ -//go:build linux -// +build linux - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "errors" - "fmt" - "strconv" - "strings" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/types" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/events" - "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" -) - -const ( - defaultNodeAllocatableCgroupName = "kubepods" -) - -// createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true -func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { - nodeAllocatable := cm.internalCapacity - // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. - nc := cm.NodeConfig.NodeAllocatableConfig - if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(kubetypes.NodeAllocatableEnforcementKey) { - nodeAllocatable = cm.getNodeAllocatableInternalAbsolute() - } - - cgroupConfig := &CgroupConfig{ - Name: cm.cgroupRoot, - // The default limits for cpu shares can be very low which can lead to CPU starvation for pods. - ResourceParameters: cm.getCgroupConfig(nodeAllocatable, false), - } - if cm.cgroupManager.Exists(cgroupConfig.Name) { - return nil - } - if err := cm.cgroupManager.Create(cgroupConfig); err != nil { - klog.ErrorS(err, "Failed to create cgroup", "cgroupName", cm.cgroupRoot) - return err - } - return nil -} - -// enforceNodeAllocatableCgroups enforce Node Allocatable Cgroup settings. -func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { - nc := cm.NodeConfig.NodeAllocatableConfig - - // We need to update limits on node allocatable cgroup no matter what because - // default cpu shares on cgroups are low and can cause cpu starvation. - nodeAllocatable := cm.internalCapacity - // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. - if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(kubetypes.NodeAllocatableEnforcementKey) { - nodeAllocatable = cm.getNodeAllocatableInternalAbsolute() - } - - klog.V(4).InfoS("Attempting to enforce Node Allocatable", "config", nc) - - cgroupConfig := &CgroupConfig{ - Name: cm.cgroupRoot, - ResourceParameters: cm.getCgroupConfig(nodeAllocatable, false), - } - - // Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail. - nodeRef := nodeRefFromNode(cm.nodeInfo.Name) - - // If Node Allocatable is enforced on a node that has not been drained or is updated on an existing node to a lower value, - // existing memory usage across pods might be higher than current Node Allocatable Memory Limits. - // Pod Evictions are expected to bring down memory usage to below Node Allocatable limits. - // Until evictions happen retry cgroup updates. - // Update limits on non root cgroup-root to be safe since the default limits for CPU can be too low. - // Check if cgroupRoot is set to a non-empty value (empty would be the root container) - if len(cm.cgroupRoot) > 0 { - go func() { - for { - err := cm.cgroupManager.Update(cgroupConfig) - if err == nil { - cm.recorder.Event(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods") - return - } - message := fmt.Sprintf("Failed to update Node Allocatable Limits %q: %v", cm.cgroupRoot, err) - cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - time.Sleep(time.Minute) - } - }() - } - // Now apply kube reserved and system reserved limits if required. - if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) { - klog.V(2).InfoS("Enforcing system reserved on cgroup", "cgroupName", nc.SystemReservedCgroupName, "limits", nc.SystemReserved) - if err := cm.enforceExistingCgroup(nc.SystemReservedCgroupName, nc.SystemReserved, false); err != nil { - message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) - cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - return errors.New(message) - } - cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) - } - if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) { - klog.V(2).InfoS("Enforcing kube reserved on cgroup", "cgroupName", nc.KubeReservedCgroupName, "limits", nc.KubeReserved) - if err := cm.enforceExistingCgroup(nc.KubeReservedCgroupName, nc.KubeReserved, false); err != nil { - message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) - cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - return errors.New(message) - } - cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) - } - - if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedCompressibleEnforcementKey) { - klog.V(2).InfoS("Enforcing system reserved compressible on cgroup", "cgroupName", nc.SystemReservedCgroupName, "limits", nc.SystemReserved) - if err := cm.enforceExistingCgroup(nc.SystemReservedCgroupName, nc.SystemReserved, true); err != nil { - message := fmt.Sprintf("Failed to enforce System Reserved Compressible Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) - cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - return errors.New(message) - } - cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) - } - - if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedCompressibleEnforcementKey) { - klog.V(2).InfoS("Enforcing kube reserved compressible on cgroup", "cgroupName", nc.KubeReservedCgroupName, "limits", nc.KubeReserved) - if err := cm.enforceExistingCgroup(nc.KubeReservedCgroupName, nc.KubeReserved, true); err != nil { - message := fmt.Sprintf("Failed to enforce Kube Reserved Compressible Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) - cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - return errors.New(message) - } - cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) - } - return nil -} - -// enforceExistingCgroup updates the limits `rl` on existing cgroup `cName` using `cgroupManager` interface. -func (cm *containerManagerImpl) enforceExistingCgroup(cNameStr string, rl v1.ResourceList, compressibleResources bool) error { - cName := cm.cgroupManager.CgroupName(cNameStr) - rp := cm.getCgroupConfig(rl, compressibleResources) - if rp == nil { - return fmt.Errorf("%q cgroup is not configured properly", cName) - } - - // Enforce MemoryQoS for cgroups of kube-reserved/system-reserved. For more information, - // see https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2570-memory-qos - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) { - if rp.Memory != nil { - if rp.Unified == nil { - rp.Unified = make(map[string]string) - } - rp.Unified[Cgroup2MemoryMin] = strconv.FormatInt(*rp.Memory, 10) - } - } - - cgroupConfig := &CgroupConfig{ - Name: cName, - ResourceParameters: rp, - } - klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CPUShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit) - if err := cm.cgroupManager.Validate(cgroupConfig.Name); err != nil { - return err - } - if err := cm.cgroupManager.Update(cgroupConfig); err != nil { - return err - } - return nil -} - -// getCgroupConfig returns a ResourceConfig object that can be used to create or update cgroups via CgroupManager interface. -func (cm *containerManagerImpl) getCgroupConfig(rl v1.ResourceList, compressibleResourcesOnly bool) *ResourceConfig { - rc := getCgroupConfigInternal(rl, compressibleResourcesOnly) - if rc == nil { - return nil - } - - // In the case of a None policy, cgroupv2 and systemd cgroup manager, we must make sure systemd is aware of the cpuset cgroup. - // By default, systemd will not create it, as we've not chosen to delegate it, and we haven't included it in the Apply() request. - // However, this causes a bug where kubelet restarts unnecessarily (cpuset cgroup is created in the cgroupfs, but systemd - // doesn't know about it and deletes it, and then kubelet doesn't continue because the cgroup isn't configured as expected). - // An alternative is to delegate the `cpuset` cgroup to the kubelet, but that would require some plumbing in libcontainer, - // and this is sufficient. - // Only do so on None policy, as Static policy will do its own updating of the cpuset. - // Please see the comment on policy none's GetAllocatableCPUs - if cm.cpuManager.GetAllocatableCPUs().IsEmpty() { - rc.CPUSet = cm.cpuManager.GetAllCPUs() - } - - return rc -} - -// getCgroupConfigInternal are the pieces of getCgroupConfig that don't require the cm object. -// This is added to unit test without needing to create a full containerManager -func getCgroupConfigInternal(rl v1.ResourceList, compressibleResourcesOnly bool) *ResourceConfig { - // TODO(vishh): Set CPU Quota if necessary. - if rl == nil { - return nil - } - var rc ResourceConfig - - setCompressibleResources := func() { - if q, exists := rl[v1.ResourceCPU]; exists { - // CPU is defined in milli-cores. - val := MilliCPUToShares(q.MilliValue()) - rc.CPUShares = &val - } - } - - // Only return compressible resources - if compressibleResourcesOnly { - setCompressibleResources() - } else { - if q, exists := rl[v1.ResourceMemory]; exists { - // Memory is defined in bytes. - val := q.Value() - rc.Memory = &val - } - - setCompressibleResources() - - if q, exists := rl[pidlimit.PIDs]; exists { - val := q.Value() - rc.PidsLimit = &val - } - rc.HugePageLimit = HugePageLimits(rl) - } - return &rc -} - -// GetNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement. -// Note that not all resources that are available on the node are included in the returned list of resources. -// Returns a ResourceList. -func (cm *containerManagerImpl) GetNodeAllocatableAbsolute() v1.ResourceList { - return cm.getNodeAllocatableAbsoluteImpl(cm.capacity) -} - -func (cm *containerManagerImpl) getNodeAllocatableAbsoluteImpl(capacity v1.ResourceList) v1.ResourceList { - result := make(v1.ResourceList) - for k, v := range capacity { - value := v.DeepCopy() - if cm.NodeConfig.SystemReserved != nil { - value.Sub(cm.NodeConfig.SystemReserved[k]) - } - if cm.NodeConfig.KubeReserved != nil { - value.Sub(cm.NodeConfig.KubeReserved[k]) - } - if value.Sign() < 0 { - // Negative Allocatable resources don't make sense. - value.Set(0) - } - result[k] = value - } - return result -} - -// getNodeAllocatableInternalAbsolute is similar to getNodeAllocatableAbsolute except that -// it also includes internal resources (currently process IDs). It is intended for setting -// up top level cgroups only. -func (cm *containerManagerImpl) getNodeAllocatableInternalAbsolute() v1.ResourceList { - return cm.getNodeAllocatableAbsoluteImpl(cm.internalCapacity) -} - -// GetNodeAllocatableReservation returns amount of compute or storage resource that have to be reserved on this node from scheduling. -func (cm *containerManagerImpl) GetNodeAllocatableReservation() v1.ResourceList { - evictionReservation := hardEvictionReservation(cm.HardEvictionThresholds, cm.capacity) - result := make(v1.ResourceList) - for k := range cm.capacity { - value := resource.NewQuantity(0, resource.DecimalSI) - if cm.NodeConfig.SystemReserved != nil { - value.Add(cm.NodeConfig.SystemReserved[k]) - } - if cm.NodeConfig.KubeReserved != nil { - value.Add(cm.NodeConfig.KubeReserved[k]) - } - if evictionReservation != nil { - value.Add(evictionReservation[k]) - } - if !value.IsZero() { - result[k] = *value - } - } - return result -} - -// validateNodeAllocatable ensures that the user specified Node Allocatable Configuration doesn't reserve more than the node capacity. -// Returns error if the configuration is invalid, nil otherwise. -func (cm *containerManagerImpl) validateNodeAllocatable() error { - var errors []string - nar := cm.GetNodeAllocatableReservation() - for k, v := range nar { - value := cm.capacity[k].DeepCopy() - value.Sub(v) - - if value.Sign() < 0 { - errors = append(errors, fmt.Sprintf("Resource %q has a reservation of %v but capacity of %v. Expected capacity >= reservation.", k, v, cm.capacity[k])) - } - } - - if len(errors) > 0 { - return fmt.Errorf("invalid Node Allocatable configuration. %s", strings.Join(errors, " ")) - } - return nil -} - -// Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail. -func nodeRefFromNode(nodeName string) *v1.ObjectReference { - return &v1.ObjectReference{ - Kind: "Node", - Name: nodeName, - UID: types.UID(nodeName), - Namespace: "", - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go deleted file mode 100644 index 3159e0ed7fa..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go +++ /dev/null @@ -1,362 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "errors" - "fmt" - "os" - "path" - "strings" - - libcontainercgroups "github.com/opencontainers/cgroups" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" - kubefeatures "k8s.io/kubernetes/pkg/features" -) - -const ( - podCgroupNamePrefix = "pod" -) - -// podContainerManagerImpl implements podContainerManager interface. -// It is the general implementation which allows pod level container -// management if qos Cgroup is enabled. -type podContainerManagerImpl struct { - // qosContainersInfo hold absolute paths of the top level qos containers - qosContainersInfo QOSContainersInfo - // Stores the mounted cgroup subsystems - subsystems *CgroupSubsystems - // cgroupManager is the cgroup Manager Object responsible for managing all - // pod cgroups. - cgroupManager CgroupManager - // Maximum number of pids in a pod - podPidsLimit int64 - // enforceCPULimits controls whether cfs quota is enforced or not - enforceCPULimits bool - // cpuCFSQuotaPeriod is the cfs period value, cfs_period_us, setting per - // node for all containers in usec - cpuCFSQuotaPeriod uint64 - // podContainerManager is the ContainerManager running on the machine - podContainerManager ContainerManager -} - -// Make sure that podContainerManagerImpl implements the PodContainerManager interface -var _ PodContainerManager = &podContainerManagerImpl{} - -// Exists checks if the pod's cgroup already exists -func (m *podContainerManagerImpl) Exists(pod *v1.Pod) bool { - podContainerName, _ := m.GetPodContainerName(pod) - return m.cgroupManager.Exists(podContainerName) -} - -// EnsureExists takes a pod as argument and makes sure that -// pod cgroup exists if qos cgroup hierarchy flag is enabled. -// If the pod level container doesn't already exist it is created. -func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error { - // check if container already exist - alreadyExists := m.Exists(pod) - if !alreadyExists { - enforceCPULimits := m.enforceCPULimits - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DisableCPUQuotaWithExclusiveCPUs) && m.podContainerManager.PodHasExclusiveCPUs(pod) { - klog.V(2).InfoS("Disabled CFS quota", "pod", klog.KObj(pod)) - enforceCPULimits = false - } - enforceMemoryQoS := false - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) && - libcontainercgroups.IsCgroup2UnifiedMode() { - enforceMemoryQoS = true - } - // Create the pod container - podContainerName, _ := m.GetPodContainerName(pod) - containerConfig := &CgroupConfig{ - Name: podContainerName, - ResourceParameters: ResourceConfigForPod(pod, enforceCPULimits, m.cpuCFSQuotaPeriod, enforceMemoryQoS), - } - if m.podPidsLimit > 0 { - containerConfig.ResourceParameters.PidsLimit = &m.podPidsLimit - } - if enforceMemoryQoS { - klog.V(4).InfoS("MemoryQoS config for pod", "pod", klog.KObj(pod), "unified", containerConfig.ResourceParameters.Unified) - } - if err := m.cgroupManager.Create(containerConfig); err != nil { - return fmt.Errorf("failed to create container for %v : %v", podContainerName, err) - } - } - return nil -} - -// GetPodContainerName returns the CgroupName identifier, and its literal cgroupfs form on the host. -func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, string) { - podQOS := v1qos.GetPodQOS(pod) - // Get the parent QOS container name - var parentContainer CgroupName - switch podQOS { - case v1.PodQOSGuaranteed: - parentContainer = m.qosContainersInfo.Guaranteed - case v1.PodQOSBurstable: - parentContainer = m.qosContainersInfo.Burstable - case v1.PodQOSBestEffort: - parentContainer = m.qosContainersInfo.BestEffort - } - podContainer := GetPodCgroupNameSuffix(pod.UID) - - // Get the absolute path of the cgroup - cgroupName := NewCgroupName(parentContainer, podContainer) - // Get the literal cgroupfs name - cgroupfsName := m.cgroupManager.Name(cgroupName) - - return cgroupName, cgroupfsName -} - -func (m *podContainerManagerImpl) GetPodCgroupMemoryUsage(pod *v1.Pod) (uint64, error) { - podCgroupName, _ := m.GetPodContainerName(pod) - memUsage, err := m.cgroupManager.MemoryUsage(podCgroupName) - if err != nil { - return 0, err - } - return uint64(memUsage), nil -} - -func (m *podContainerManagerImpl) GetPodCgroupConfig(pod *v1.Pod, resource v1.ResourceName) (*ResourceConfig, error) { - podCgroupName, _ := m.GetPodContainerName(pod) - return m.cgroupManager.GetCgroupConfig(podCgroupName, resource) -} - -func (m *podContainerManagerImpl) SetPodCgroupConfig(pod *v1.Pod, resourceConfig *ResourceConfig) error { - podCgroupName, _ := m.GetPodContainerName(pod) - return m.cgroupManager.SetCgroupConfig(podCgroupName, resourceConfig) -} - -// Kill one process ID -func (m *podContainerManagerImpl) killOnePid(pid int) error { - // os.FindProcess never returns an error on POSIX - // https://go-review.googlesource.com/c/go/+/19093 - p, _ := os.FindProcess(pid) - if err := p.Kill(); err != nil { - // If the process already exited, that's fine. - if errors.Is(err, os.ErrProcessDone) { - klog.V(3).InfoS("Process no longer exists", "pid", pid) - return nil - } - return err - } - return nil -} - -// Scan through the whole cgroup directory and kill all processes either -// attached to the pod cgroup or to a container cgroup under the pod cgroup -func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName) error { - pidsToKill := m.cgroupManager.Pids(podCgroup) - // No pids charged to the terminated pod cgroup return - if len(pidsToKill) == 0 { - return nil - } - - var errlist []error - // os.Kill often errors out, - // We try killing all the pids multiple times - removed := map[int]bool{} - for i := 0; i < 5; i++ { - if i != 0 { - klog.V(3).InfoS("Attempt failed to kill all unwanted process from cgroup, retrying", "attempt", i, "cgroupName", podCgroup) - } - errlist = []error{} - for _, pid := range pidsToKill { - if _, ok := removed[pid]; ok { - continue - } - klog.V(3).InfoS("Attempting to kill process from cgroup", "pid", pid, "cgroupName", podCgroup) - if err := m.killOnePid(pid); err != nil { - klog.V(3).InfoS("Failed to kill process from cgroup", "pid", pid, "cgroupName", podCgroup, "err", err) - errlist = append(errlist, err) - } else { - removed[pid] = true - } - } - if len(errlist) == 0 { - klog.V(3).InfoS("Successfully killed all unwanted processes from cgroup", "cgroupName", podCgroup) - return nil - } - } - return utilerrors.NewAggregate(errlist) -} - -// Destroy destroys the pod container cgroup paths -func (m *podContainerManagerImpl) Destroy(podCgroup CgroupName) error { - // Try killing all the processes attached to the pod cgroup - if err := m.tryKillingCgroupProcesses(podCgroup); err != nil { - klog.InfoS("Failed to kill all the processes attached to cgroup", "cgroupName", podCgroup, "err", err) - return fmt.Errorf("failed to kill all the processes attached to the %v cgroups : %v", podCgroup, err) - } - - // Now its safe to remove the pod's cgroup - containerConfig := &CgroupConfig{ - Name: podCgroup, - ResourceParameters: &ResourceConfig{}, - } - if err := m.cgroupManager.Destroy(containerConfig); err != nil { - klog.InfoS("Failed to delete cgroup paths", "cgroupName", podCgroup, "err", err) - return fmt.Errorf("failed to delete cgroup paths for %v : %v", podCgroup, err) - } - return nil -} - -// ReduceCPULimits reduces the CPU CFS values to the minimum amount of shares. -func (m *podContainerManagerImpl) ReduceCPULimits(podCgroup CgroupName) error { - return m.cgroupManager.ReduceCPULimits(podCgroup) -} - -// IsPodCgroup returns true if the literal cgroupfs name corresponds to a pod -func (m *podContainerManagerImpl) IsPodCgroup(cgroupfs string) (bool, types.UID) { - // convert the literal cgroupfs form to the driver specific value - cgroupName := m.cgroupManager.CgroupName(cgroupfs) - qosContainersList := [3]CgroupName{m.qosContainersInfo.BestEffort, m.qosContainersInfo.Burstable, m.qosContainersInfo.Guaranteed} - basePath := "" - for _, qosContainerName := range qosContainersList { - // a pod cgroup is a direct child of a qos node, so check if its a match - if len(cgroupName) == len(qosContainerName)+1 { - basePath = cgroupName[len(qosContainerName)] - } - } - if basePath == "" { - return false, types.UID("") - } - if !strings.HasPrefix(basePath, podCgroupNamePrefix) { - return false, types.UID("") - } - parts := strings.Split(basePath, podCgroupNamePrefix) - if len(parts) != 2 { - return false, types.UID("") - } - return true, types.UID(parts[1]) -} - -// GetAllPodsFromCgroups scans through all the subsystems of pod cgroups -// Get list of pods whose cgroup still exist on the cgroup mounts -func (m *podContainerManagerImpl) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) { - // Map for storing all the found pods on the disk - foundPods := make(map[types.UID]CgroupName) - qosContainersList := [3]CgroupName{m.qosContainersInfo.BestEffort, m.qosContainersInfo.Burstable, m.qosContainersInfo.Guaranteed} - // Scan through all the subsystem mounts - // and through each QoS cgroup directory for each subsystem mount - // If a pod cgroup exists in even a single subsystem mount - // we will attempt to delete it - for _, val := range m.subsystems.MountPoints { - for _, qosContainerName := range qosContainersList { - // get the subsystems QoS cgroup absolute name - qcConversion := m.cgroupManager.Name(qosContainerName) - qc := path.Join(val, qcConversion) - dirInfo, err := os.ReadDir(qc) - if err != nil { - if os.IsNotExist(err) { - continue - } - return nil, fmt.Errorf("failed to read the cgroup directory %v : %v", qc, err) - } - for i := range dirInfo { - // its not a directory, so continue on... - if !dirInfo[i].IsDir() { - continue - } - // convert the concrete cgroupfs name back to an internal identifier - // this is needed to handle path conversion for systemd environments. - // we pass the fully qualified path so decoding can work as expected - // since systemd encodes the path in each segment. - cgroupfsPath := path.Join(qcConversion, dirInfo[i].Name()) - internalPath := m.cgroupManager.CgroupName(cgroupfsPath) - // we only care about base segment of the converted path since that - // is what we are reading currently to know if it is a pod or not. - basePath := internalPath[len(internalPath)-1] - if !strings.Contains(basePath, podCgroupNamePrefix) { - continue - } - // we then split the name on the pod prefix to determine the uid - parts := strings.Split(basePath, podCgroupNamePrefix) - // the uid is missing, so we log the unexpected cgroup not of form pod - if len(parts) != 2 { - klog.InfoS("Pod cgroup manager ignored unexpected cgroup because it is not a pod", "path", cgroupfsPath) - continue - } - podUID := parts[1] - foundPods[types.UID(podUID)] = internalPath - } - } - } - return foundPods, nil -} - -// podContainerManagerNoop implements podContainerManager interface. -// It is a no-op implementation and basically does nothing -// podContainerManagerNoop is used in case the QoS cgroup Hierarchy is not -// enabled, so Exists() returns true always as the cgroupRoot -// is expected to always exist. -type podContainerManagerNoop struct { - cgroupRoot CgroupName -} - -// Make sure that podContainerManagerStub implements the PodContainerManager interface -var _ PodContainerManager = &podContainerManagerNoop{} - -func (m *podContainerManagerNoop) Exists(_ *v1.Pod) bool { - return true -} - -func (m *podContainerManagerNoop) EnsureExists(_ *v1.Pod) error { - return nil -} - -func (m *podContainerManagerNoop) GetPodContainerName(_ *v1.Pod) (CgroupName, string) { - return m.cgroupRoot, "" -} - -func (m *podContainerManagerNoop) GetPodContainerNameForDriver(_ *v1.Pod) string { - return "" -} - -// Destroy destroys the pod container cgroup paths -func (m *podContainerManagerNoop) Destroy(_ CgroupName) error { - return nil -} - -func (m *podContainerManagerNoop) ReduceCPULimits(_ CgroupName) error { - return nil -} - -func (m *podContainerManagerNoop) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) { - return nil, nil -} - -func (m *podContainerManagerNoop) IsPodCgroup(cgroupfs string) (bool, types.UID) { - return false, types.UID("") -} - -func (m *podContainerManagerNoop) GetPodCgroupMemoryUsage(_ *v1.Pod) (uint64, error) { - return 0, nil -} - -func (m *podContainerManagerNoop) GetPodCgroupConfig(_ *v1.Pod, _ v1.ResourceName) (*ResourceConfig, error) { - return nil, nil -} - -func (m *podContainerManagerNoop) SetPodCgroupConfig(_ *v1.Pod, _ *ResourceConfig) error { - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_stub.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_stub.go deleted file mode 100644 index 36a995c1895..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_stub.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" -) - -type podContainerManagerStub struct { -} - -var _ PodContainerManager = &podContainerManagerStub{} - -func (m *podContainerManagerStub) Exists(_ *v1.Pod) bool { - return true -} - -func (m *podContainerManagerStub) EnsureExists(_ *v1.Pod) error { - return nil -} - -func (m *podContainerManagerStub) GetPodContainerName(_ *v1.Pod) (CgroupName, string) { - return nil, "" -} - -func (m *podContainerManagerStub) Destroy(_ CgroupName) error { - return nil -} - -func (m *podContainerManagerStub) ReduceCPULimits(_ CgroupName) error { - return nil -} - -func (m *podContainerManagerStub) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) { - return nil, nil -} - -func (m *podContainerManagerStub) IsPodCgroup(cgroupfs string) (bool, types.UID) { - return false, types.UID("") -} - -func (m *podContainerManagerStub) GetPodCgroupMemoryUsage(_ *v1.Pod) (uint64, error) { - return 0, nil -} - -func (m *podContainerManagerStub) GetPodCgroupMemoryLimit(_ *v1.Pod) (uint64, error) { - return 0, nil -} - -func (m *podContainerManagerStub) GetPodCgroupCpuLimit(_ *v1.Pod) (int64, uint64, uint64, error) { - return 0, 0, 0, nil -} - -func (m *podContainerManagerStub) SetPodCgroupMemoryLimit(_ *v1.Pod, _ int64) error { - return nil -} - -func (m *podContainerManagerStub) SetPodCgroupCpuLimit(_ *v1.Pod, _ *int64, _, _ *uint64) error { - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go deleted file mode 100644 index aac639c1e65..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go +++ /dev/null @@ -1,407 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "fmt" - "strconv" - "strings" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" - - "k8s.io/apimachinery/pkg/util/wait" - - units "github.com/docker/go-units" - libcontainercgroups "github.com/opencontainers/cgroups" - utilfeature "k8s.io/apiserver/pkg/util/feature" - - "k8s.io/component-helpers/resource" - v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" - kubefeatures "k8s.io/kubernetes/pkg/features" -) - -const ( - // how often the qos cgroup manager will perform periodic update - // of the qos level cgroup resource constraints - periodicQOSCgroupUpdateInterval = 1 * time.Minute -) - -type QOSContainerManager interface { - Start(func() v1.ResourceList, ActivePodsFunc) error - GetQOSContainersInfo() QOSContainersInfo - UpdateCgroups() error -} - -type qosContainerManagerImpl struct { - sync.Mutex - qosContainersInfo QOSContainersInfo - subsystems *CgroupSubsystems - cgroupManager CgroupManager - activePods ActivePodsFunc - getNodeAllocatable func() v1.ResourceList - cgroupRoot CgroupName - qosReserved map[v1.ResourceName]int64 -} - -func NewQOSContainerManager(subsystems *CgroupSubsystems, cgroupRoot CgroupName, nodeConfig NodeConfig, cgroupManager CgroupManager) (QOSContainerManager, error) { - if !nodeConfig.CgroupsPerQOS { - return &qosContainerManagerNoop{ - cgroupRoot: cgroupRoot, - }, nil - } - - return &qosContainerManagerImpl{ - subsystems: subsystems, - cgroupManager: cgroupManager, - cgroupRoot: cgroupRoot, - qosReserved: nodeConfig.QOSReserved, - }, nil -} - -func (m *qosContainerManagerImpl) GetQOSContainersInfo() QOSContainersInfo { - return m.qosContainersInfo -} - -func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceList, activePods ActivePodsFunc) error { - cm := m.cgroupManager - rootContainer := m.cgroupRoot - - if err := cm.Validate(rootContainer); err != nil { - return fmt.Errorf("error validating root container %v : %w", rootContainer, err) - } - - // Top level for Qos containers are created only for Burstable - // and Best Effort classes - qosClasses := map[v1.PodQOSClass]CgroupName{ - v1.PodQOSBurstable: NewCgroupName(rootContainer, strings.ToLower(string(v1.PodQOSBurstable))), - v1.PodQOSBestEffort: NewCgroupName(rootContainer, strings.ToLower(string(v1.PodQOSBestEffort))), - } - - // Create containers for both qos classes - for qosClass, containerName := range qosClasses { - resourceParameters := &ResourceConfig{} - // the BestEffort QoS class has a statically configured minShares value - if qosClass == v1.PodQOSBestEffort { - minShares := uint64(MinShares) - resourceParameters.CPUShares = &minShares - } - - // containerConfig object stores the cgroup specifications - containerConfig := &CgroupConfig{ - Name: containerName, - ResourceParameters: resourceParameters, - } - - // for each enumerated huge page size, the qos tiers are unbounded - m.setHugePagesUnbounded(containerConfig) - - // check if it exists - if !cm.Exists(containerName) { - if err := cm.Create(containerConfig); err != nil { - return fmt.Errorf("failed to create top level %v QOS cgroup : %v", qosClass, err) - } - } else { - // to ensure we actually have the right state, we update the config on startup - if err := cm.Update(containerConfig); err != nil { - return fmt.Errorf("failed to update top level %v QOS cgroup : %v", qosClass, err) - } - } - } - // Store the top level qos container names - m.qosContainersInfo = QOSContainersInfo{ - Guaranteed: rootContainer, - Burstable: qosClasses[v1.PodQOSBurstable], - BestEffort: qosClasses[v1.PodQOSBestEffort], - } - m.getNodeAllocatable = getNodeAllocatable - m.activePods = activePods - - // update qos cgroup tiers on startup and in periodic intervals - // to ensure desired state is in sync with actual state. - go wait.Until(func() { - err := m.UpdateCgroups() - if err != nil { - klog.InfoS("Failed to reserve QoS requests", "err", err) - } - }, periodicQOSCgroupUpdateInterval, wait.NeverStop) - - return nil -} - -// setHugePagesUnbounded ensures hugetlb is effectively unbounded -func (m *qosContainerManagerImpl) setHugePagesUnbounded(cgroupConfig *CgroupConfig) error { - hugePageLimit := map[int64]int64{} - for _, pageSize := range libcontainercgroups.HugePageSizes() { - pageSizeBytes, err := units.RAMInBytes(pageSize) - if err != nil { - return err - } - hugePageLimit[pageSizeBytes] = int64(1 << 62) - } - cgroupConfig.ResourceParameters.HugePageLimit = hugePageLimit - return nil -} - -func (m *qosContainerManagerImpl) setHugePagesConfig(configs map[v1.PodQOSClass]*CgroupConfig) error { - for _, v := range configs { - if err := m.setHugePagesUnbounded(v); err != nil { - return err - } - } - return nil -} - -func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]*CgroupConfig) error { - pods := m.activePods() - burstablePodCPURequest := int64(0) - reuseReqs := make(v1.ResourceList, 4) - for i := range pods { - pod := pods[i] - qosClass := v1qos.GetPodQOS(pod) - if qosClass != v1.PodQOSBurstable { - // we only care about the burstable qos tier - continue - } - req := resource.PodRequests(pod, resource.PodResourcesOptions{ - Reuse: reuseReqs, - // SkipPodLevelResources is set to false when PodLevelResources feature is enabled. - SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources), - }) - if request, found := req[v1.ResourceCPU]; found { - burstablePodCPURequest += request.MilliValue() - } - } - - // make sure best effort is always 2 shares - bestEffortCPUShares := uint64(MinShares) - configs[v1.PodQOSBestEffort].ResourceParameters.CPUShares = &bestEffortCPUShares - - // set burstable shares based on current observe state - burstableCPUShares := MilliCPUToShares(burstablePodCPURequest) - configs[v1.PodQOSBurstable].ResourceParameters.CPUShares = &burstableCPUShares - return nil -} - -// getQoSMemoryRequests sums and returns the memory request of all pods for -// guaranteed and burstable qos classes. -func (m *qosContainerManagerImpl) getQoSMemoryRequests() map[v1.PodQOSClass]int64 { - qosMemoryRequests := map[v1.PodQOSClass]int64{ - v1.PodQOSGuaranteed: 0, - v1.PodQOSBurstable: 0, - } - - // Sum the pod limits for pods in each QOS class - pods := m.activePods() - reuseReqs := make(v1.ResourceList, 4) - for _, pod := range pods { - podMemoryRequest := int64(0) - qosClass := v1qos.GetPodQOS(pod) - if qosClass == v1.PodQOSBestEffort { - // limits are not set for Best Effort pods - continue - } - req := resource.PodRequests(pod, resource.PodResourcesOptions{Reuse: reuseReqs}) - if request, found := req[v1.ResourceMemory]; found { - podMemoryRequest += request.Value() - } - qosMemoryRequests[qosClass] += podMemoryRequest - } - - return qosMemoryRequests -} - -// setMemoryReserve sums the memory limits of all pods in a QOS class, -// calculates QOS class memory limits, and set those limits in the -// CgroupConfig for each QOS class. -func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*CgroupConfig, percentReserve int64) { - qosMemoryRequests := m.getQoSMemoryRequests() - - resources := m.getNodeAllocatable() - allocatableResource, ok := resources[v1.ResourceMemory] - if !ok { - klog.V(2).InfoS("Allocatable memory value could not be determined, not setting QoS memory limits") - return - } - allocatable := allocatableResource.Value() - if allocatable == 0 { - klog.V(2).InfoS("Allocatable memory reported as 0, might be in standalone mode, not setting QoS memory limits") - return - } - - for qos, limits := range qosMemoryRequests { - klog.V(2).InfoS("QoS pod memory limit", "qos", qos, "limits", limits, "percentReserve", percentReserve) - } - - // Calculate QOS memory limits - burstableLimit := allocatable - (qosMemoryRequests[v1.PodQOSGuaranteed] * percentReserve / 100) - bestEffortLimit := burstableLimit - (qosMemoryRequests[v1.PodQOSBurstable] * percentReserve / 100) - configs[v1.PodQOSBurstable].ResourceParameters.Memory = &burstableLimit - configs[v1.PodQOSBestEffort].ResourceParameters.Memory = &bestEffortLimit -} - -// retrySetMemoryReserve checks for any QoS cgroups over the limit -// that was attempted to be set in the first Update() and adjusts -// their memory limit to the usage to prevent further growth. -func (m *qosContainerManagerImpl) retrySetMemoryReserve(configs map[v1.PodQOSClass]*CgroupConfig, percentReserve int64) { - // Unreclaimable memory usage may already exceeded the desired limit - // Attempt to set the limit near the current usage to put pressure - // on the cgroup and prevent further growth. - for qos, config := range configs { - usage, err := m.cgroupManager.MemoryUsage(config.Name) - if err != nil { - klog.V(2).InfoS("Failed to get resource stats", "err", err) - return - } - - // Because there is no good way to determine of the original Update() - // on the memory resource was successful, we determine failure of the - // first attempt by checking if the usage is above the limit we attempt - // to set. If it is, we assume the first attempt to set the limit failed - // and try again setting the limit to the usage. Otherwise we leave - // the CgroupConfig as is. - if configs[qos].ResourceParameters.Memory != nil && usage > *configs[qos].ResourceParameters.Memory { - configs[qos].ResourceParameters.Memory = &usage - } - } -} - -// setMemoryQoS sums the memory requests of all pods in the Burstable class, -// and set the sum memory as the memory.min in the Unified field of CgroupConfig. -func (m *qosContainerManagerImpl) setMemoryQoS(configs map[v1.PodQOSClass]*CgroupConfig) { - qosMemoryRequests := m.getQoSMemoryRequests() - - // Calculate the memory.min: - // for burstable(/kubepods/burstable): sum of all burstable pods - // for guaranteed(/kubepods): sum of all guaranteed and burstable pods - burstableMin := qosMemoryRequests[v1.PodQOSBurstable] - guaranteedMin := qosMemoryRequests[v1.PodQOSGuaranteed] + burstableMin - - if burstableMin > 0 { - if configs[v1.PodQOSBurstable].ResourceParameters.Unified == nil { - configs[v1.PodQOSBurstable].ResourceParameters.Unified = make(map[string]string) - } - configs[v1.PodQOSBurstable].ResourceParameters.Unified[Cgroup2MemoryMin] = strconv.FormatInt(burstableMin, 10) - klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSBurstable, "memoryMin", burstableMin) - } - - if guaranteedMin > 0 { - if configs[v1.PodQOSGuaranteed].ResourceParameters.Unified == nil { - configs[v1.PodQOSGuaranteed].ResourceParameters.Unified = make(map[string]string) - } - configs[v1.PodQOSGuaranteed].ResourceParameters.Unified[Cgroup2MemoryMin] = strconv.FormatInt(guaranteedMin, 10) - klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSGuaranteed, "memoryMin", guaranteedMin) - } -} - -func (m *qosContainerManagerImpl) UpdateCgroups() error { - m.Lock() - defer m.Unlock() - - qosConfigs := map[v1.PodQOSClass]*CgroupConfig{ - v1.PodQOSGuaranteed: { - Name: m.qosContainersInfo.Guaranteed, - ResourceParameters: &ResourceConfig{}, - }, - v1.PodQOSBurstable: { - Name: m.qosContainersInfo.Burstable, - ResourceParameters: &ResourceConfig{}, - }, - v1.PodQOSBestEffort: { - Name: m.qosContainersInfo.BestEffort, - ResourceParameters: &ResourceConfig{}, - }, - } - - // update the qos level cgroup settings for cpu shares - if err := m.setCPUCgroupConfig(qosConfigs); err != nil { - return err - } - - // update the qos level cgroup settings for huge pages (ensure they remain unbounded) - if err := m.setHugePagesConfig(qosConfigs); err != nil { - return err - } - - // update the qos level cgrougs v2 settings of memory qos if feature enabled - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) && - libcontainercgroups.IsCgroup2UnifiedMode() { - m.setMemoryQoS(qosConfigs) - } - - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.QOSReserved) { - for resource, percentReserve := range m.qosReserved { - switch resource { - case v1.ResourceMemory: - m.setMemoryReserve(qosConfigs, percentReserve) - } - } - - updateSuccess := true - for _, config := range qosConfigs { - err := m.cgroupManager.Update(config) - if err != nil { - updateSuccess = false - } - } - if updateSuccess { - klog.V(4).InfoS("Updated QoS cgroup configuration") - return nil - } - - // If the resource can adjust the ResourceConfig to increase likelihood of - // success, call the adjustment function here. Otherwise, the Update() will - // be called again with the same values. - for resource, percentReserve := range m.qosReserved { - switch resource { - case v1.ResourceMemory: - m.retrySetMemoryReserve(qosConfigs, percentReserve) - } - } - } - - for _, config := range qosConfigs { - err := m.cgroupManager.Update(config) - if err != nil { - klog.ErrorS(err, "Failed to update QoS cgroup configuration") - return err - } - } - - klog.V(4).InfoS("Updated QoS cgroup configuration") - return nil -} - -type qosContainerManagerNoop struct { - cgroupRoot CgroupName -} - -var _ QOSContainerManager = &qosContainerManagerNoop{} - -func (m *qosContainerManagerNoop) GetQOSContainersInfo() QOSContainersInfo { - return QOSContainersInfo{} -} - -func (m *qosContainerManagerNoop) Start(_ func() v1.ResourceList, _ ActivePodsFunc) error { - return nil -} - -func (m *qosContainerManagerNoop) UpdateCgroups() error { - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates/updates.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates/updates.go deleted file mode 100644 index 3bf0155a3e5..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates/updates.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourceupdates - -// Update is a struct that represents an update to a pod when -// the resource changes it's status. -// Later we may need to add fields like container name, resource name, and a new status. -type Update struct { - // PodUID is the UID of the pod which status needs to be updated. - PodUIDs []string -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/OWNERS deleted file mode 100644 index f0df77f40c4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - derekwaynecarr - - klueska -reviewers: [] -emeritus_approvers: - - ConnorDoyle - - lmdaly diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/bitmask.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/bitmask.go deleted file mode 100644 index 3d029adb691..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/bitmask.go +++ /dev/null @@ -1,222 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package bitmask - -import ( - "fmt" - "math/bits" - "strconv" -) - -// BitMask interface allows hint providers to create BitMasks for TopologyHints -type BitMask interface { - Add(bits ...int) error - Remove(bits ...int) error - And(masks ...BitMask) - Or(masks ...BitMask) - Clear() - Fill() - IsEqual(mask BitMask) bool - IsEmpty() bool - IsSet(bit int) bool - AnySet(bits []int) bool - IsNarrowerThan(mask BitMask) bool - IsLessThan(mask BitMask) bool - IsGreaterThan(mask BitMask) bool - String() string - Count() int - GetBits() []int -} - -type bitMask uint64 - -// NewEmptyBitMask creates a new, empty BitMask -func NewEmptyBitMask() BitMask { - s := bitMask(0) - return &s -} - -// NewBitMask creates a new BitMask -func NewBitMask(bits ...int) (BitMask, error) { - s := bitMask(0) - err := (&s).Add(bits...) - if err != nil { - return nil, err - } - return &s, nil -} - -// Add adds the bits with topology affinity to the BitMask -func (s *bitMask) Add(bits ...int) error { - mask := *s - for _, i := range bits { - if i < 0 || i >= 64 { - return fmt.Errorf("bit number must be in range 0-63") - } - mask |= 1 << uint64(i) - } - *s = mask - return nil -} - -// Remove removes specified bits from BitMask -func (s *bitMask) Remove(bits ...int) error { - mask := *s - for _, i := range bits { - if i < 0 || i >= 64 { - return fmt.Errorf("bit number must be in range 0-63") - } - mask &^= 1 << uint64(i) - } - *s = mask - return nil -} - -// And performs and operation on all bits in masks -func (s *bitMask) And(masks ...BitMask) { - for _, m := range masks { - *s &= *m.(*bitMask) - } -} - -// Or performs or operation on all bits in masks -func (s *bitMask) Or(masks ...BitMask) { - for _, m := range masks { - *s |= *m.(*bitMask) - } -} - -// Clear resets all bits in mask to zero -func (s *bitMask) Clear() { - *s = 0 -} - -// Fill sets all bits in mask to one -func (s *bitMask) Fill() { - *s = bitMask(^uint64(0)) -} - -// IsEmpty checks mask to see if all bits are zero -func (s *bitMask) IsEmpty() bool { - return *s == 0 -} - -// IsSet checks bit in mask to see if bit is set to one -func (s *bitMask) IsSet(bit int) bool { - if bit < 0 || bit >= 64 { - return false - } - return (*s & (1 << uint64(bit))) > 0 -} - -// AnySet checks bit in mask to see if any provided bit is set to one -func (s *bitMask) AnySet(bits []int) bool { - for _, b := range bits { - if s.IsSet(b) { - return true - } - } - return false -} - -// IsEqual checks if masks are equal -func (s *bitMask) IsEqual(mask BitMask) bool { - return *s == *mask.(*bitMask) -} - -// IsNarrowerThan checks if one mask is narrower than another. -// -// A mask is said to be "narrower" than another if it has lets bits set. If the -// same number of bits are set in both masks, then the mask with more -// lower-numbered bits set wins out. -func (s *bitMask) IsNarrowerThan(mask BitMask) bool { - if s.Count() == mask.Count() { - return s.IsLessThan(mask) - } - return s.Count() < mask.Count() -} - -// IsLessThan checks which bitmask has more lower-numbered bits set. -func (s *bitMask) IsLessThan(mask BitMask) bool { - return *s < *mask.(*bitMask) -} - -// IsGreaterThan checks which bitmask has more higher-numbered bits set. -func (s *bitMask) IsGreaterThan(mask BitMask) bool { - return *s > *mask.(*bitMask) -} - -// String converts mask to string -func (s *bitMask) String() string { - grouping := 2 - for shift := 64 - grouping; shift > 0; shift -= grouping { - if *s > (1 << uint(shift)) { - return fmt.Sprintf("%0"+strconv.Itoa(shift+grouping)+"b", *s) - } - } - return fmt.Sprintf("%0"+strconv.Itoa(grouping)+"b", *s) -} - -// Count counts number of bits in mask set to one -func (s *bitMask) Count() int { - return bits.OnesCount64(uint64(*s)) -} - -// Getbits returns each bit number with bits set to one -func (s *bitMask) GetBits() []int { - var bits []int - for i := uint64(0); i < 64; i++ { - if (*s & (1 << i)) > 0 { - bits = append(bits, int(i)) - } - } - return bits -} - -// And is a package level implementation of 'and' between first and masks -func And(first BitMask, masks ...BitMask) BitMask { - s := *first.(*bitMask) - s.And(masks...) - return &s -} - -// Or is a package level implementation of 'or' between first and masks -func Or(first BitMask, masks ...BitMask) BitMask { - s := *first.(*bitMask) - s.Or(masks...) - return &s -} - -// IterateBitMasks iterates all possible masks from a list of bits, -// issuing a callback on each mask. -func IterateBitMasks(bits []int, callback func(BitMask)) { - var iterate func(bits, accum []int, size int) - iterate = func(bits, accum []int, size int) { - if len(accum) == size { - mask, _ := NewBitMask(accum...) - callback(mask) - return - } - for i := range bits { - iterate(bits[i+1:], append(accum, bits[i]), size) - } - } - - for i := 1; i <= len(bits); i++ { - iterate(bits, []int{}, i) - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go deleted file mode 100644 index 19b07a719ca..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -import ( - "k8s.io/api/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/admission" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" -) - -type fakeManager struct { - hint *TopologyHint - policy Policy -} - -// NewFakeManager returns an instance of FakeManager -func NewFakeManager() Manager { - klog.InfoS("NewFakeManager") - return &fakeManager{} -} - -// NewFakeManagerWithHint returns an instance of fake topology manager with specified topology hints -func NewFakeManagerWithHint(hint *TopologyHint) Manager { - klog.InfoS("NewFakeManagerWithHint") - return &fakeManager{ - hint: hint, - policy: NewNonePolicy(), - } -} - -// NewFakeManagerWithPolicy returns an instance of fake topology manager with specified policy -func NewFakeManagerWithPolicy(policy Policy) Manager { - klog.InfoS("NewFakeManagerWithPolicy", "policy", policy.Name()) - return &fakeManager{ - policy: policy, - } -} - -func (m *fakeManager) GetAffinity(podUID string, containerName string) TopologyHint { - klog.InfoS("GetAffinity", "podUID", podUID, "containerName", containerName) - if m.hint == nil { - return TopologyHint{} - } - - return *m.hint -} - -func (m *fakeManager) GetPolicy() Policy { - return m.policy -} - -func (m *fakeManager) AddHintProvider(h HintProvider) { - klog.InfoS("AddHintProvider", "hintProvider", h) -} - -func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { - klog.InfoS("AddContainer", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID) -} - -func (m *fakeManager) RemoveContainer(containerID string) error { - klog.InfoS("RemoveContainer", "containerID", containerID) - return nil -} - -func (m *fakeManager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { - klog.InfoS("Topology Admit Handler") - return admission.GetPodAdmitResult(nil) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/numa_info.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/numa_info.go deleted file mode 100644 index c6977266742..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/numa_info.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -import ( - "fmt" - - cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" -) - -type NUMADistances map[int][]uint64 - -type NUMAInfo struct { - Nodes []int - NUMADistances NUMADistances -} - -func NewNUMAInfo(topology []cadvisorapi.Node, opts PolicyOptions) (*NUMAInfo, error) { - var numaNodes []int - distances := map[int][]uint64{} - for _, node := range topology { - numaNodes = append(numaNodes, node.Id) - - var nodeDistance []uint64 - if opts.PreferClosestNUMA { - nodeDistance = node.Distances - if nodeDistance == nil { - return nil, fmt.Errorf("error getting NUMA distances from cadvisor") - } - } - distances[node.Id] = nodeDistance - } - - numaInfo := &NUMAInfo{ - Nodes: numaNodes, - NUMADistances: distances, - } - - return numaInfo, nil -} - -func (n *NUMAInfo) Narrowest(m1 bitmask.BitMask, m2 bitmask.BitMask) bitmask.BitMask { - if m1.IsNarrowerThan(m2) { - return m1 - } - return m2 -} - -func (n *NUMAInfo) Closest(m1 bitmask.BitMask, m2 bitmask.BitMask) bitmask.BitMask { - // If the length of both bitmasks aren't the same, choose the one that is narrowest. - if m1.Count() != m2.Count() { - return n.Narrowest(m1, m2) - } - - m1Distance := n.NUMADistances.CalculateAverageFor(m1) - m2Distance := n.NUMADistances.CalculateAverageFor(m2) - // If average distance is the same, take bitmask with more lower-number bits set. - if m1Distance == m2Distance { - if m1.IsLessThan(m2) { - return m1 - } - return m2 - } - - // Otherwise, return the bitmask with the shortest average distance between NUMA nodes. - if m1Distance < m2Distance { - return m1 - } - - return m2 -} - -func (n NUMAInfo) DefaultAffinityMask() bitmask.BitMask { - defaultAffinity, _ := bitmask.NewBitMask(n.Nodes...) - return defaultAffinity -} - -func (d NUMADistances) CalculateAverageFor(bm bitmask.BitMask) float64 { - // This should never happen, but just in case make sure we do not divide by zero. - if bm.Count() == 0 { - return 0 - } - - var count float64 = 0 - var sum float64 = 0 - for _, node1 := range bm.GetBits() { - for _, node2 := range bm.GetBits() { - sum += float64(d[node1][node2]) - count++ - } - } - - return sum / count -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go deleted file mode 100644 index 80e9292ae41..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy.go +++ /dev/null @@ -1,361 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -import ( - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" -) - -// Policy interface for Topology Manager Pod Admit Result -type Policy interface { - // Returns Policy Name - Name() string - // Returns a merged TopologyHint based on input from hint providers - // and a Pod Admit Handler Response based on hints and policy type - Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) -} - -// IsAlignmentGuaranteed return true if the given policy guarantees that either -// the compute resources will be allocated within a NUMA boundary, or the allocation will fail at all. -func IsAlignmentGuaranteed(p Policy) bool { - // We are abusing the name, but atm this matches almost 1:1 the policy name - // so we are not adding new fields for now. - return p.Name() == PolicySingleNumaNode -} - -// Merge a TopologyHints permutation to a single hint by performing a bitwise-AND -// of their affinity masks. The hint shall be preferred if all hits in the permutation -// are preferred. -func mergePermutation(defaultAffinity bitmask.BitMask, permutation []TopologyHint) TopologyHint { - // Get the NUMANodeAffinity from each hint in the permutation and see if any - // of them encode unpreferred allocations. - preferred := true - var numaAffinities []bitmask.BitMask - for _, hint := range permutation { - // Only consider hints that have an actual NUMANodeAffinity set. - if hint.NUMANodeAffinity != nil { - numaAffinities = append(numaAffinities, hint.NUMANodeAffinity) - // Only mark preferred if all affinities are equal. - if !hint.NUMANodeAffinity.IsEqual(numaAffinities[0]) { - preferred = false - } - } - // Only mark preferred if all affinities are preferred. - if !hint.Preferred { - preferred = false - } - } - - // Merge the affinities using a bitwise-and operation. - mergedAffinity := bitmask.And(defaultAffinity, numaAffinities...) - // Build a mergedHint from the merged affinity mask, setting preferred as - // appropriate based on the logic above. - return TopologyHint{mergedAffinity, preferred} -} - -func filterProvidersHints(providersHints []map[string][]TopologyHint) [][]TopologyHint { - // Loop through all hint providers and save an accumulated list of the - // hints returned by each hint provider. If no hints are provided, assume - // that provider has no preference for topology-aware allocation. - var allProviderHints [][]TopologyHint - for _, hints := range providersHints { - // If hints is nil, insert a single, preferred any-numa hint into allProviderHints. - if len(hints) == 0 { - klog.InfoS("Hint Provider has no preference for NUMA affinity with any resource") - allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}}) - continue - } - - // Otherwise, accumulate the hints for each resource type into allProviderHints. - for resource := range hints { - if hints[resource] == nil { - klog.InfoS("Hint Provider has no preference for NUMA affinity with resource", "resource", resource) - allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}}) - continue - } - - if len(hints[resource]) == 0 { - klog.InfoS("Hint Provider has no possible NUMA affinities for resource", "resource", resource) - allProviderHints = append(allProviderHints, []TopologyHint{{nil, false}}) - continue - } - - allProviderHints = append(allProviderHints, hints[resource]) - } - } - return allProviderHints -} - -func narrowestHint(hints []TopologyHint) *TopologyHint { - if len(hints) == 0 { - return nil - } - var narrowestHint *TopologyHint - for i := range hints { - if hints[i].NUMANodeAffinity == nil { - continue - } - if narrowestHint == nil { - narrowestHint = &hints[i] - } - if hints[i].NUMANodeAffinity.IsNarrowerThan(narrowestHint.NUMANodeAffinity) { - narrowestHint = &hints[i] - } - } - return narrowestHint -} - -func maxOfMinAffinityCounts(filteredHints [][]TopologyHint) int { - maxOfMinCount := 0 - for _, resourceHints := range filteredHints { - narrowestHint := narrowestHint(resourceHints) - if narrowestHint == nil { - continue - } - if narrowestHint.NUMANodeAffinity.Count() > maxOfMinCount { - maxOfMinCount = narrowestHint.NUMANodeAffinity.Count() - } - } - return maxOfMinCount -} - -type HintMerger struct { - NUMAInfo *NUMAInfo - Hints [][]TopologyHint - // Set bestNonPreferredAffinityCount to help decide which affinity mask is - // preferred amongst all non-preferred hints. We calculate this value as - // the maximum of the minimum affinity counts supplied for any given hint - // provider. In other words, prefer a hint that has an affinity mask that - // includes all of the NUMA nodes from the provider that requires the most - // NUMA nodes to satisfy its allocation. - BestNonPreferredAffinityCount int - CompareNUMAAffinityMasks func(candidate *TopologyHint, current *TopologyHint) (best *TopologyHint) -} - -func NewHintMerger(numaInfo *NUMAInfo, hints [][]TopologyHint, policyName string, opts PolicyOptions) HintMerger { - compareNumaAffinityMasks := func(current, candidate *TopologyHint) *TopologyHint { - // If current and candidate bitmasks are the same, prefer current hint. - if candidate.NUMANodeAffinity.IsEqual(current.NUMANodeAffinity) { - return current - } - - // Otherwise compare the hints, based on the policy options provided - var best bitmask.BitMask - if (policyName != PolicySingleNumaNode) && opts.PreferClosestNUMA { - best = numaInfo.Closest(current.NUMANodeAffinity, candidate.NUMANodeAffinity) - } else { - best = numaInfo.Narrowest(current.NUMANodeAffinity, candidate.NUMANodeAffinity) - } - if best.IsEqual(current.NUMANodeAffinity) { - return current - } - return candidate - } - - merger := HintMerger{ - NUMAInfo: numaInfo, - Hints: hints, - BestNonPreferredAffinityCount: maxOfMinAffinityCounts(hints), - CompareNUMAAffinityMasks: compareNumaAffinityMasks, - } - - return merger -} - -func (m HintMerger) compare(current *TopologyHint, candidate *TopologyHint) *TopologyHint { - // Only consider candidates that result in a NUMANodeAffinity > 0 to - // replace the current bestHint. - if candidate.NUMANodeAffinity.Count() == 0 { - return current - } - - // If no current bestHint is set, return the candidate as the bestHint. - if current == nil { - return candidate - } - - // If the current bestHint is non-preferred and the candidate hint is - // preferred, always choose the preferred hint over the non-preferred one. - if !current.Preferred && candidate.Preferred { - return candidate - } - - // If the current bestHint is preferred and the candidate hint is - // non-preferred, never update the bestHint, regardless of how - // the candidate hint's affinity mask compares to the current - // hint's affinity mask. - if current.Preferred && !candidate.Preferred { - return current - } - - // If the current bestHint and the candidate hint are both preferred, - // then only consider fitter NUMANodeAffinity - if current.Preferred && candidate.Preferred { - return m.CompareNUMAAffinityMasks(current, candidate) - - } - - // The only case left is if the current best bestHint and the candidate - // hint are both non-preferred. In this case, try and find a hint whose - // affinity count is as close to (but not higher than) the - // bestNonPreferredAffinityCount as possible. To do this we need to - // consider the following cases and react accordingly: - // - // 1. current.NUMANodeAffinity.Count() > bestNonPreferredAffinityCount - // 2. current.NUMANodeAffinity.Count() == bestNonPreferredAffinityCount - // 3. current.NUMANodeAffinity.Count() < bestNonPreferredAffinityCount - // - // For case (1), the current bestHint is larger than the - // bestNonPreferredAffinityCount, so updating to fitter mergeHint - // is preferred over staying where we are. - // - // For case (2), the current bestHint is equal to the - // bestNonPreferredAffinityCount, so we would like to stick with what - // we have *unless* the candidate hint is also equal to - // bestNonPreferredAffinityCount and it is fitter. - // - // For case (3), the current bestHint is less than - // bestNonPreferredAffinityCount, so we would like to creep back up to - // bestNonPreferredAffinityCount as close as we can. There are three - // cases to consider here: - // - // 3a. candidate.NUMANodeAffinity.Count() > bestNonPreferredAffinityCount - // 3b. candidate.NUMANodeAffinity.Count() == bestNonPreferredAffinityCount - // 3c. candidate.NUMANodeAffinity.Count() < bestNonPreferredAffinityCount - // - // For case (3a), we just want to stick with the current bestHint - // because choosing a new hint that is greater than - // bestNonPreferredAffinityCount would be counter-productive. - // - // For case (3b), we want to immediately update bestHint to the - // candidate hint, making it now equal to bestNonPreferredAffinityCount. - // - // For case (3c), we know that *both* the current bestHint and the - // candidate hint are less than bestNonPreferredAffinityCount, so we - // want to choose one that brings us back up as close to - // bestNonPreferredAffinityCount as possible. There are three cases to - // consider here: - // - // 3ca. candidate.NUMANodeAffinity.Count() > current.NUMANodeAffinity.Count() - // 3cb. candidate.NUMANodeAffinity.Count() < current.NUMANodeAffinity.Count() - // 3cc. candidate.NUMANodeAffinity.Count() == current.NUMANodeAffinity.Count() - // - // For case (3ca), we want to immediately update bestHint to the - // candidate hint because that will bring us closer to the (higher) - // value of bestNonPreferredAffinityCount. - // - // For case (3cb), we want to stick with the current bestHint because - // choosing the candidate hint would strictly move us further away from - // the bestNonPreferredAffinityCount. - // - // Finally, for case (3cc), we know that the current bestHint and the - // candidate hint are equal, so we simply choose the fitter of the 2. - - // Case 1 - if current.NUMANodeAffinity.Count() > m.BestNonPreferredAffinityCount { - return m.CompareNUMAAffinityMasks(current, candidate) - } - // Case 2 - if current.NUMANodeAffinity.Count() == m.BestNonPreferredAffinityCount { - if candidate.NUMANodeAffinity.Count() != m.BestNonPreferredAffinityCount { - return current - } - return m.CompareNUMAAffinityMasks(current, candidate) - } - // Case 3a - if candidate.NUMANodeAffinity.Count() > m.BestNonPreferredAffinityCount { - return current - } - // Case 3b - if candidate.NUMANodeAffinity.Count() == m.BestNonPreferredAffinityCount { - return candidate - } - - // Case 3ca - if candidate.NUMANodeAffinity.Count() > current.NUMANodeAffinity.Count() { - return candidate - } - // Case 3cb - if candidate.NUMANodeAffinity.Count() < current.NUMANodeAffinity.Count() { - return current - } - - // Case 3cc - return m.CompareNUMAAffinityMasks(current, candidate) - -} - -func (m HintMerger) Merge() TopologyHint { - defaultAffinity := m.NUMAInfo.DefaultAffinityMask() - - var bestHint *TopologyHint - iterateAllProviderTopologyHints(m.Hints, func(permutation []TopologyHint) { - // Get the NUMANodeAffinity from each hint in the permutation and see if any - // of them encode unpreferred allocations. - mergedHint := mergePermutation(defaultAffinity, permutation) - - // Compare the current bestHint with the candidate mergedHint and - // update bestHint if appropriate. - bestHint = m.compare(bestHint, &mergedHint) - }) - - if bestHint == nil { - bestHint = &TopologyHint{defaultAffinity, false} - } - - return *bestHint -} - -// Iterate over all permutations of hints in 'allProviderHints [][]TopologyHint'. -// -// This procedure is implemented as a recursive function over the set of hints -// in 'allproviderHints[i]'. It applies the function 'callback' to each -// permutation as it is found. It is the equivalent of: -// -// for i := 0; i < len(providerHints[0]); i++ -// -// for j := 0; j < len(providerHints[1]); j++ -// for k := 0; k < len(providerHints[2]); k++ -// ... -// for z := 0; z < len(providerHints[-1]); z++ -// permutation := []TopologyHint{ -// providerHints[0][i], -// providerHints[1][j], -// providerHints[2][k], -// ... -// providerHints[-1][z] -// } -// callback(permutation) -func iterateAllProviderTopologyHints(allProviderHints [][]TopologyHint, callback func([]TopologyHint)) { - // Internal helper function to accumulate the permutation before calling the callback. - var iterate func(i int, accum []TopologyHint) - iterate = func(i int, accum []TopologyHint) { - // Base case: we have looped through all providers and have a full permutation. - if i == len(allProviderHints) { - callback(accum) - return - } - - // Loop through all hints for provider 'i', and recurse to build the - // permutation of this hint with all hints from providers 'i++'. - for j := range allProviderHints[i] { - iterate(i+1, append(accum, allProviderHints[i][j])) - } - } - iterate(0, []TopologyHint{}) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go deleted file mode 100644 index 5cedad3da70..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_best_effort.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -type bestEffortPolicy struct { - // numaInfo represents list of NUMA Nodes available on the underlying machine and distances between them - numaInfo *NUMAInfo - opts PolicyOptions -} - -var _ Policy = &bestEffortPolicy{} - -// PolicyBestEffort policy name. -const PolicyBestEffort string = "best-effort" - -// NewBestEffortPolicy returns best-effort policy. -func NewBestEffortPolicy(numaInfo *NUMAInfo, opts PolicyOptions) Policy { - return &bestEffortPolicy{numaInfo: numaInfo, opts: opts} -} - -func (p *bestEffortPolicy) Name() string { - return PolicyBestEffort -} - -func (p *bestEffortPolicy) canAdmitPodResult(hint *TopologyHint) bool { - return true -} - -func (p *bestEffortPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) { - filteredHints := filterProvidersHints(providersHints) - merger := NewHintMerger(p.numaInfo, filteredHints, p.Name(), p.opts) - bestHint := merger.Merge() - admit := p.canAdmitPodResult(&bestHint) - return bestHint, admit -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go deleted file mode 100644 index 271f9dde79b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_none.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -type nonePolicy struct{} - -var _ Policy = &nonePolicy{} - -// PolicyNone policy name. -const PolicyNone string = "none" - -// NewNonePolicy returns none policy. -func NewNonePolicy() Policy { - return &nonePolicy{} -} - -func (p *nonePolicy) Name() string { - return PolicyNone -} - -func (p *nonePolicy) canAdmitPodResult(hint *TopologyHint) bool { - return true -} - -func (p *nonePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) { - return TopologyHint{}, p.canAdmitPodResult(nil) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_options.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_options.go deleted file mode 100644 index 5490aba112e..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_options.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -import ( - "fmt" - "strconv" - - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - kubefeatures "k8s.io/kubernetes/pkg/features" -) - -const ( - PreferClosestNUMANodes string = "prefer-closest-numa-nodes" - MaxAllowableNUMANodes string = "max-allowable-numa-nodes" -) - -var ( - alphaOptions = sets.New[string]() - betaOptions = sets.New[string]( - MaxAllowableNUMANodes, - ) - stableOptions = sets.New[string]( - PreferClosestNUMANodes, - ) -) - -func CheckPolicyOptionAvailable(option string) error { - if !alphaOptions.Has(option) && !betaOptions.Has(option) && !stableOptions.Has(option) { - return fmt.Errorf("unknown Topology Manager Policy option: %q", option) - } - - if alphaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManagerPolicyAlphaOptions) { - return fmt.Errorf("topology manager policy alpha-level options not enabled, but option %q provided", option) - } - - if betaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManagerPolicyBetaOptions) { - return fmt.Errorf("topology manager policy beta-level options not enabled, but option %q provided", option) - } - - return nil -} - -type PolicyOptions struct { - PreferClosestNUMA bool - MaxAllowableNUMANodes int -} - -func NewPolicyOptions(policyOptions map[string]string) (PolicyOptions, error) { - opts := PolicyOptions{ - // Set MaxAllowableNUMANodes to the default. This will be overwritten - // if the user has specified a policy option for MaxAllowableNUMANodes. - MaxAllowableNUMANodes: defaultMaxAllowableNUMANodes, - } - - for name, value := range policyOptions { - if err := CheckPolicyOptionAvailable(name); err != nil { - return opts, err - } - - switch name { - case PreferClosestNUMANodes: - optValue, err := strconv.ParseBool(value) - if err != nil { - return opts, fmt.Errorf("bad value for option %q: %w", name, err) - } - opts.PreferClosestNUMA = optValue - case MaxAllowableNUMANodes: - optValue, err := strconv.Atoi(value) - if err != nil { - return opts, fmt.Errorf("unable to convert policy option to integer %q: %w", name, err) - } - - if optValue < defaultMaxAllowableNUMANodes { - return opts, fmt.Errorf("the minimum value of %q should not be less than %v", name, defaultMaxAllowableNUMANodes) - } - - if optValue > defaultMaxAllowableNUMANodes { - klog.InfoS("WARNING: the value of max-allowable-numa-nodes is more than the default recommended value", "max-allowable-numa-nodes", optValue, "defaultMaxAllowableNUMANodes", defaultMaxAllowableNUMANodes) - } - opts.MaxAllowableNUMANodes = optValue - default: - // this should never be reached, we already detect unknown options, - // but we keep it as further safety. - return opts, fmt.Errorf("unsupported topologymanager option: %q (%s)", name, value) - } - } - return opts, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go deleted file mode 100644 index 88422b0087e..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_restricted.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -type restrictedPolicy struct { - bestEffortPolicy -} - -var _ Policy = &restrictedPolicy{} - -// PolicyRestricted policy name. -const PolicyRestricted string = "restricted" - -// NewRestrictedPolicy returns restricted policy. -func NewRestrictedPolicy(numaInfo *NUMAInfo, opts PolicyOptions) Policy { - return &restrictedPolicy{bestEffortPolicy{numaInfo: numaInfo, opts: opts}} -} - -func (p *restrictedPolicy) Name() string { - return PolicyRestricted -} - -func (p *restrictedPolicy) canAdmitPodResult(hint *TopologyHint) bool { - return hint.Preferred -} - -func (p *restrictedPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) { - filteredHints := filterProvidersHints(providersHints) - merger := NewHintMerger(p.numaInfo, filteredHints, p.Name(), p.opts) - bestHint := merger.Merge() - admit := p.canAdmitPodResult(&bestHint) - return bestHint, admit -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go deleted file mode 100644 index d865aa9f18f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_single_numa_node.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -type singleNumaNodePolicy struct { - // numaInfo represents list of NUMA Nodes available on the underlying machine and distances between them - numaInfo *NUMAInfo - opts PolicyOptions -} - -var _ Policy = &singleNumaNodePolicy{} - -// PolicySingleNumaNode policy name. -const PolicySingleNumaNode string = "single-numa-node" - -// NewSingleNumaNodePolicy returns single-numa-node policy. -func NewSingleNumaNodePolicy(numaInfo *NUMAInfo, opts PolicyOptions) Policy { - return &singleNumaNodePolicy{numaInfo: numaInfo, opts: opts} -} - -func (p *singleNumaNodePolicy) Name() string { - return PolicySingleNumaNode -} - -func (p *singleNumaNodePolicy) canAdmitPodResult(hint *TopologyHint) bool { - return hint.Preferred -} - -// Return hints that have valid bitmasks with exactly one bit set. -func filterSingleNumaHints(allResourcesHints [][]TopologyHint) [][]TopologyHint { - var filteredResourcesHints [][]TopologyHint - for _, oneResourceHints := range allResourcesHints { - var filtered []TopologyHint - for _, hint := range oneResourceHints { - if hint.NUMANodeAffinity == nil && hint.Preferred { - filtered = append(filtered, hint) - } - if hint.NUMANodeAffinity != nil && hint.NUMANodeAffinity.Count() == 1 && hint.Preferred { - filtered = append(filtered, hint) - } - } - filteredResourcesHints = append(filteredResourcesHints, filtered) - } - return filteredResourcesHints -} - -func (p *singleNumaNodePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) { - filteredHints := filterProvidersHints(providersHints) - // Filter to only include don't cares and hints with a single NUMA node. - singleNumaHints := filterSingleNumaHints(filteredHints) - - merger := NewHintMerger(p.numaInfo, singleNumaHints, p.Name(), p.opts) - bestHint := merger.Merge() - - if bestHint.NUMANodeAffinity.IsEqual(p.numaInfo.DefaultAffinityMask()) { - bestHint = TopologyHint{nil, bestHint.Preferred} - } - - admit := p.canAdmitPodResult(&bestHint) - return bestHint, admit -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope.go deleted file mode 100644 index db3edd63e64..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -import ( - "sync" - - "k8s.io/api/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/admission" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" -) - -const ( - // containerTopologyScope specifies the TopologyManagerScope per container. - containerTopologyScope = "container" - // podTopologyScope specifies the TopologyManagerScope per pod. - podTopologyScope = "pod" - // noneTopologyScope specifies the TopologyManagerScope when topologyPolicyName is none. - noneTopologyScope = "none" -) - -type podTopologyHints map[string]map[string]TopologyHint - -// Scope interface for Topology Manager -type Scope interface { - Name() string - GetPolicy() Policy - Admit(pod *v1.Pod) lifecycle.PodAdmitResult - // AddHintProvider adds a hint provider to manager to indicate the hint provider - // wants to be consoluted with when making topology hints - AddHintProvider(h HintProvider) - // AddContainer adds pod to Manager for tracking - AddContainer(pod *v1.Pod, container *v1.Container, containerID string) - // RemoveContainer removes pod from Manager tracking - RemoveContainer(containerID string) error - // Store is the interface for storing pod topology hints - Store -} - -type scope struct { - mutex sync.Mutex - name string - // Mapping of a Pods mapping of Containers and their TopologyHints - // Indexed by PodUID to ContainerName - podTopologyHints podTopologyHints - // The list of components registered with the Manager - hintProviders []HintProvider - // Topology Manager Policy - policy Policy - // Mapping of (PodUid, ContainerName) to ContainerID for Adding/Removing Pods from PodTopologyHints mapping - podMap containermap.ContainerMap -} - -func (s *scope) Name() string { - return s.name -} - -func (s *scope) getTopologyHints(podUID string, containerName string) TopologyHint { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.podTopologyHints[podUID][containerName] -} - -func (s *scope) setTopologyHints(podUID string, containerName string, th TopologyHint) { - s.mutex.Lock() - defer s.mutex.Unlock() - - if s.podTopologyHints[podUID] == nil { - s.podTopologyHints[podUID] = make(map[string]TopologyHint) - } - s.podTopologyHints[podUID][containerName] = th -} - -func (s *scope) GetAffinity(podUID string, containerName string) TopologyHint { - return s.getTopologyHints(podUID, containerName) -} - -func (s *scope) GetPolicy() Policy { - return s.policy -} - -func (s *scope) AddHintProvider(h HintProvider) { - s.hintProviders = append(s.hintProviders, h) -} - -// It would be better to implement this function in topologymanager instead of scope -// but topologymanager do not track mapping anymore -func (s *scope) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { - s.mutex.Lock() - defer s.mutex.Unlock() - - s.podMap.Add(string(pod.UID), container.Name, containerID) -} - -// It would be better to implement this function in topologymanager instead of scope -// but topologymanager do not track mapping anymore -func (s *scope) RemoveContainer(containerID string) error { - s.mutex.Lock() - defer s.mutex.Unlock() - - klog.InfoS("RemoveContainer", "containerID", containerID) - // Get the podUID and containerName associated with the containerID to be removed and remove it - podUIDString, containerName, err := s.podMap.GetContainerRef(containerID) - if err != nil { - return nil - } - s.podMap.RemoveByContainerID(containerID) - - // In cases where a container has been restarted, it's possible that the same podUID and - // containerName are already associated with a *different* containerID now. Only remove - // the TopologyHints associated with that podUID and containerName if this is not true - if _, err := s.podMap.GetContainerID(podUIDString, containerName); err != nil { - delete(s.podTopologyHints[podUIDString], containerName) - if len(s.podTopologyHints[podUIDString]) == 0 { - delete(s.podTopologyHints, podUIDString) - } - } - - return nil -} - -func (s *scope) admitPolicyNone(pod *v1.Pod) lifecycle.PodAdmitResult { - for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - err := s.allocateAlignedResources(pod, &container) - if err != nil { - return admission.GetPodAdmitResult(err) - } - } - return admission.GetPodAdmitResult(nil) -} - -// It would be better to implement this function in topologymanager instead of scope -// but topologymanager do not track providers anymore -func (s *scope) allocateAlignedResources(pod *v1.Pod, container *v1.Container) error { - for _, provider := range s.hintProviders { - err := provider.Allocate(pod, container) - if err != nil { - return err - } - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_container.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_container.go deleted file mode 100644 index 7c06c090cc6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_container.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -import ( - "k8s.io/api/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/admission" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" - "k8s.io/kubernetes/pkg/kubelet/metrics" -) - -type containerScope struct { - scope -} - -// Ensure containerScope implements Scope interface -var _ Scope = &containerScope{} - -// NewContainerScope returns a container scope. -func NewContainerScope(policy Policy) Scope { - return &containerScope{ - scope{ - name: containerTopologyScope, - podTopologyHints: podTopologyHints{}, - policy: policy, - podMap: containermap.NewContainerMap(), - }, - } -} - -func (s *containerScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult { - for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - bestHint, admit := s.calculateAffinity(pod, &container) - klog.InfoS("Best TopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name) - - if !admit { - if IsAlignmentGuaranteed(s.policy) { - metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Inc() - } - metrics.TopologyManagerAdmissionErrorsTotal.Inc() - return admission.GetPodAdmitResult(&TopologyAffinityError{}) - } - klog.InfoS("Topology Affinity", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name) - s.setTopologyHints(string(pod.UID), container.Name, bestHint) - - err := s.allocateAlignedResources(pod, &container) - if err != nil { - metrics.TopologyManagerAdmissionErrorsTotal.Inc() - return admission.GetPodAdmitResult(err) - } - - if IsAlignmentGuaranteed(s.policy) { - klog.V(4).InfoS("Resource alignment at container scope guaranteed", "pod", klog.KObj(pod)) - metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Inc() - } - } - return admission.GetPodAdmitResult(nil) -} - -func (s *containerScope) accumulateProvidersHints(pod *v1.Pod, container *v1.Container) []map[string][]TopologyHint { - var providersHints []map[string][]TopologyHint - - for _, provider := range s.hintProviders { - // Get the TopologyHints for a Container from a provider. - hints := provider.GetTopologyHints(pod, container) - providersHints = append(providersHints, hints) - klog.InfoS("TopologyHints", "hints", hints, "pod", klog.KObj(pod), "containerName", container.Name) - } - return providersHints -} - -func (s *containerScope) calculateAffinity(pod *v1.Pod, container *v1.Container) (TopologyHint, bool) { - providersHints := s.accumulateProvidersHints(pod, container) - bestHint, admit := s.policy.Merge(providersHints) - klog.InfoS("ContainerTopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name) - return bestHint, admit -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_none.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_none.go deleted file mode 100644 index c82b19e1f9c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_none.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -import ( - "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" -) - -type noneScope struct { - scope -} - -// Ensure noneScope implements Scope interface -var _ Scope = &noneScope{} - -// NewNoneScope returns a none scope. -func NewNoneScope() Scope { - return &noneScope{ - scope{ - name: noneTopologyScope, - podTopologyHints: podTopologyHints{}, - policy: NewNonePolicy(), - podMap: containermap.NewContainerMap(), - }, - } -} - -func (s *noneScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult { - return s.admitPolicyNone(pod) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_pod.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_pod.go deleted file mode 100644 index bcb421d61e4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_pod.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -import ( - "k8s.io/api/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/admission" - "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" - "k8s.io/kubernetes/pkg/kubelet/metrics" -) - -type podScope struct { - scope -} - -// Ensure podScope implements Scope interface -var _ Scope = &podScope{} - -// NewPodScope returns a pod scope. -func NewPodScope(policy Policy) Scope { - return &podScope{ - scope{ - name: podTopologyScope, - podTopologyHints: podTopologyHints{}, - policy: policy, - podMap: containermap.NewContainerMap(), - }, - } -} - -func (s *podScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult { - bestHint, admit := s.calculateAffinity(pod) - klog.InfoS("Best TopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod)) - if !admit { - if IsAlignmentGuaranteed(s.policy) { - // increment only if we know we allocate aligned resources. - metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Inc() - } - metrics.TopologyManagerAdmissionErrorsTotal.Inc() - return admission.GetPodAdmitResult(&TopologyAffinityError{}) - } - - for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - klog.InfoS("Topology Affinity", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name) - s.setTopologyHints(string(pod.UID), container.Name, bestHint) - - err := s.allocateAlignedResources(pod, &container) - if err != nil { - metrics.TopologyManagerAdmissionErrorsTotal.Inc() - return admission.GetPodAdmitResult(err) - } - } - if IsAlignmentGuaranteed(s.policy) { - // increment only if we know we allocate aligned resources. - klog.V(4).InfoS("Resource alignment at pod scope guaranteed", "pod", klog.KObj(pod)) - metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Inc() - } - return admission.GetPodAdmitResult(nil) -} - -func (s *podScope) accumulateProvidersHints(pod *v1.Pod) []map[string][]TopologyHint { - var providersHints []map[string][]TopologyHint - - for _, provider := range s.hintProviders { - // Get the TopologyHints for a Pod from a provider. - hints := provider.GetPodTopologyHints(pod) - providersHints = append(providersHints, hints) - klog.InfoS("TopologyHints", "hints", hints, "pod", klog.KObj(pod)) - } - return providersHints -} - -func (s *podScope) calculateAffinity(pod *v1.Pod) (TopologyHint, bool) { - providersHints := s.accumulateProvidersHints(pod) - bestHint, admit := s.policy.Merge(providersHints) - klog.InfoS("PodTopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod)) - return bestHint, admit -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go deleted file mode 100644 index ccaba099f80..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go +++ /dev/null @@ -1,234 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topologymanager - -import ( - "fmt" - "time" - - cadvisorapi "github.com/google/cadvisor/info/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" - "k8s.io/kubernetes/pkg/kubelet/lifecycle" - "k8s.io/kubernetes/pkg/kubelet/metrics" -) - -const ( - // defaultMaxAllowableNUMANodes specifies the maximum number of NUMA Nodes that - // the TopologyManager supports on the underlying machine. - // - // At present, having more than this number of NUMA Nodes will result in a - // state explosion when trying to enumerate possible NUMAAffinity masks and - // generate hints for them. As such, if more NUMA Nodes than this are - // present on a machine and the TopologyManager is enabled, an error will - // be returned and the TopologyManager will not be loaded. - defaultMaxAllowableNUMANodes = 8 - // ErrorTopologyAffinity represents the type for a TopologyAffinityError - ErrorTopologyAffinity = "TopologyAffinityError" -) - -// TopologyAffinityError represents an resource alignment error -type TopologyAffinityError struct{} - -func (e TopologyAffinityError) Error() string { - return "Resources cannot be allocated with Topology locality" -} - -func (e TopologyAffinityError) Type() string { - return ErrorTopologyAffinity -} - -// Manager interface provides methods for Kubelet to manage pod topology hints -type Manager interface { - // PodAdmitHandler is implemented by Manager - lifecycle.PodAdmitHandler - // AddHintProvider adds a hint provider to manager to indicate the hint provider - // wants to be consulted with when making topology hints - AddHintProvider(HintProvider) - // AddContainer adds pod to Manager for tracking - AddContainer(pod *v1.Pod, container *v1.Container, containerID string) - // RemoveContainer removes pod from Manager tracking - RemoveContainer(containerID string) error - // Store is the interface for storing pod topology hints - Store -} - -type manager struct { - //Topology Manager Scope - scope Scope -} - -// HintProvider is an interface for components that want to collaborate to -// achieve globally optimal concrete resource alignment with respect to -// NUMA locality. -type HintProvider interface { - // GetTopologyHints returns a map of resource names to a list of possible - // concrete resource allocations in terms of NUMA locality hints. Each hint - // is optionally marked "preferred" and indicates the set of NUMA nodes - // involved in the hypothetical allocation. The topology manager calls - // this function for each hint provider, and merges the hints to produce - // a consensus "best" hint. The hint providers may subsequently query the - // topology manager to influence actual resource assignment. - GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]TopologyHint - // GetPodTopologyHints returns a map of resource names to a list of possible - // concrete resource allocations per Pod in terms of NUMA locality hints. - GetPodTopologyHints(pod *v1.Pod) map[string][]TopologyHint - // Allocate triggers resource allocation to occur on the HintProvider after - // all hints have been gathered and the aggregated Hint is available via a - // call to Store.GetAffinity(). - Allocate(pod *v1.Pod, container *v1.Container) error -} - -// Store interface is to allow Hint Providers to retrieve pod affinity -type Store interface { - GetAffinity(podUID string, containerName string) TopologyHint - GetPolicy() Policy -} - -// TopologyHint is a struct containing the NUMANodeAffinity for a Container -type TopologyHint struct { - NUMANodeAffinity bitmask.BitMask - // Preferred is set to true when the NUMANodeAffinity encodes a preferred - // allocation for the Container. It is set to false otherwise. - Preferred bool -} - -// IsEqual checks if TopologyHint are equal -func (th *TopologyHint) IsEqual(topologyHint TopologyHint) bool { - if th.Preferred == topologyHint.Preferred { - if th.NUMANodeAffinity == nil || topologyHint.NUMANodeAffinity == nil { - return th.NUMANodeAffinity == topologyHint.NUMANodeAffinity - } - return th.NUMANodeAffinity.IsEqual(topologyHint.NUMANodeAffinity) - } - return false -} - -// LessThan checks if TopologyHint `a` is less than TopologyHint `b` -// this means that either `a` is a preferred hint and `b` is not -// or `a` NUMANodeAffinity attribute is narrower than `b` NUMANodeAffinity attribute. -func (th *TopologyHint) LessThan(other TopologyHint) bool { - if th.Preferred != other.Preferred { - return th.Preferred - } - return th.NUMANodeAffinity.IsNarrowerThan(other.NUMANodeAffinity) -} - -var _ Manager = &manager{} - -// NewManager creates a new TopologyManager based on provided policy and scope -func NewManager(topology []cadvisorapi.Node, topologyPolicyName string, topologyScopeName string, topologyPolicyOptions map[string]string) (Manager, error) { - // When policy is none, the scope is not relevant, so we can short circuit here. - if topologyPolicyName == PolicyNone { - klog.InfoS("Creating topology manager with none policy") - return &manager{scope: NewNoneScope()}, nil - } - - opts, err := NewPolicyOptions(topologyPolicyOptions) - if err != nil { - return nil, err - } - - klog.InfoS("Creating topology manager with policy per scope", "topologyPolicyName", topologyPolicyName, "topologyScopeName", topologyScopeName, "topologyPolicyOptions", opts) - - numaInfo, err := NewNUMAInfo(topology, opts) - if err != nil { - return nil, fmt.Errorf("cannot discover NUMA topology: %w", err) - } - - if topologyPolicyName != PolicyNone && len(numaInfo.Nodes) > opts.MaxAllowableNUMANodes { - return nil, fmt.Errorf("unsupported on machines with more than %v NUMA Nodes", opts.MaxAllowableNUMANodes) - } - - var policy Policy - switch topologyPolicyName { - - case PolicyBestEffort: - policy = NewBestEffortPolicy(numaInfo, opts) - - case PolicyRestricted: - policy = NewRestrictedPolicy(numaInfo, opts) - - case PolicySingleNumaNode: - policy = NewSingleNumaNodePolicy(numaInfo, opts) - - default: - return nil, fmt.Errorf("unknown policy: \"%s\"", topologyPolicyName) - } - - var scope Scope - switch topologyScopeName { - - case containerTopologyScope: - scope = NewContainerScope(policy) - - case podTopologyScope: - scope = NewPodScope(policy) - - default: - return nil, fmt.Errorf("unknown scope: \"%s\"", topologyScopeName) - } - - manager := &manager{ - scope: scope, - } - - manager.initializeMetrics() - - return manager, nil -} - -func (m *manager) initializeMetrics() { - // ensure the values exist - metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Add(0) - metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Add(0) - metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Add(0) - metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Add(0) -} - -func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint { - return m.scope.GetAffinity(podUID, containerName) -} - -func (m *manager) GetPolicy() Policy { - return m.scope.GetPolicy() -} - -func (m *manager) AddHintProvider(h HintProvider) { - m.scope.AddHintProvider(h) -} - -func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { - m.scope.AddContainer(pod, container, containerID) -} - -func (m *manager) RemoveContainer(containerID string) error { - return m.scope.RemoveContainer(containerID) -} - -func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { - klog.V(4).InfoS("Topology manager admission check", "pod", klog.KObj(attrs.Pod)) - metrics.TopologyManagerAdmissionRequestsTotal.Inc() - - startTime := time.Now() - podAdmitResult := m.scope.Admit(attrs.Pod) - metrics.TopologyManagerAdmissionDuration.Observe(float64(time.Since(startTime).Milliseconds())) - - klog.V(4).InfoS("Pod Admit Result", "Message", podAdmitResult.Message, "pod", klog.KObj(attrs.Pod)) - return podAdmitResult -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go deleted file mode 100644 index e6338d3af81..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/cpuset" -) - -// ResourceConfig holds information about all the supported cgroup resource parameters. -type ResourceConfig struct { - // Memory limit (in bytes). - Memory *int64 - // CPU set (number of CPUs the cgroup has access to). - CPUSet cpuset.CPUSet - // CPU shares (relative weight vs. other containers). - CPUShares *uint64 - // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - CPUQuota *int64 - // CPU quota period. - CPUPeriod *uint64 - // HugePageLimit map from page size (in bytes) to limit (in bytes) - HugePageLimit map[int64]int64 - // Maximum number of pids - PidsLimit *int64 - // Unified for cgroup v2 - Unified map[string]string -} - -// CgroupName is the abstract name of a cgroup prior to any driver specific conversion. -// It is specified as a list of strings from its individual components, such as: -// {"kubepods", "burstable", "pod1234-abcd-5678-efgh"} -type CgroupName []string - -// CgroupConfig holds the cgroup configuration information. -// This is common object which is used to specify -// cgroup information to both systemd and raw cgroup fs -// implementation of the Cgroup Manager interface. -type CgroupConfig struct { - // Fully qualified name prior to any driver specific conversions. - Name CgroupName - // ResourceParameters contains various cgroups settings to apply. - ResourceParameters *ResourceConfig -} - -// CgroupManager allows for cgroup management. -// Supports Cgroup Creation ,Deletion and Updates. -type CgroupManager interface { - // Create creates and applies the cgroup configurations on the cgroup. - // It just creates the leaf cgroups. - // It expects the parent cgroup to already exist. - Create(*CgroupConfig) error - // Destroy the cgroup. - Destroy(*CgroupConfig) error - // Update cgroup configuration. - Update(*CgroupConfig) error - // Validate checks if the cgroup is valid - Validate(name CgroupName) error - // Exists checks if the cgroup already exists - Exists(name CgroupName) bool - // Name returns the literal cgroupfs name on the host after any driver specific conversions. - // We would expect systemd implementation to make appropriate name conversion. - // For example, if we pass {"foo", "bar"} - // then systemd should convert the name to something like - // foo.slice/foo-bar.slice - Name(name CgroupName) string - // CgroupName converts the literal cgroupfs name on the host to an internal identifier. - CgroupName(name string) CgroupName - // Pids scans through all subsystems to find pids associated with specified cgroup. - Pids(name CgroupName) []int - // ReduceCPULimits reduces the CPU CFS values to the minimum amount of shares. - ReduceCPULimits(cgroupName CgroupName) error - // MemoryUsage returns current memory usage of the specified cgroup, as read from the cgroupfs. - MemoryUsage(name CgroupName) (int64, error) - // Get the resource config values applied to the cgroup for specified resource type - GetCgroupConfig(name CgroupName, resource v1.ResourceName) (*ResourceConfig, error) - // Set resource config for the specified resource type on the cgroup - SetCgroupConfig(name CgroupName, resourceConfig *ResourceConfig) error - // Version of the cgroup implementation on the host - Version() int -} - -// QOSContainersInfo stores the names of containers per qos -type QOSContainersInfo struct { - Guaranteed CgroupName - BestEffort CgroupName - Burstable CgroupName -} - -// PodContainerManager stores and manages pod level containers -// The Pod workers interact with the PodContainerManager to create and destroy -// containers for the pod. -type PodContainerManager interface { - // GetPodContainerName returns the CgroupName identifier, and its literal cgroupfs form on the host. - GetPodContainerName(*v1.Pod) (CgroupName, string) - - // EnsureExists takes a pod as argument and makes sure that - // pod cgroup exists if qos cgroup hierarchy flag is enabled. - // If the pod cgroup doesn't already exist this method creates it. - EnsureExists(*v1.Pod) error - - // Exists returns true if the pod cgroup exists. - Exists(*v1.Pod) bool - - // Destroy takes a pod Cgroup name as argument and destroys the pod's container. - Destroy(name CgroupName) error - - // ReduceCPULimits reduces the CPU CFS values to the minimum amount of shares. - ReduceCPULimits(name CgroupName) error - - // GetAllPodsFromCgroups enumerates the set of pod uids to their associated cgroup based on state of cgroupfs system. - GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) - - // IsPodCgroup returns true if the literal cgroupfs name corresponds to a pod - IsPodCgroup(cgroupfs string) (bool, types.UID) - - // Get value of memory usage for the pod Cgroup - GetPodCgroupMemoryUsage(pod *v1.Pod) (uint64, error) - - // Get the resource config values applied to the pod cgroup for specified resource type - GetPodCgroupConfig(pod *v1.Pod, resource v1.ResourceName) (*ResourceConfig, error) - - // Set resource config values for the specified resource type on the pod cgroup - SetPodCgroupConfig(pod *v1.Pod, resourceConfig *ResourceConfig) error -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/cgroups_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/cgroups_linux.go deleted file mode 100644 index c63c0b41051..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/cgroups_linux.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "path/filepath" - - libcontainerutils "k8s.io/kubernetes/third_party/forked/libcontainer/utils" - - libcontainercgroups "github.com/opencontainers/cgroups" -) - -const ( - // CgroupRoot is the base path where cgroups are mounted - CgroupRoot = "/sys/fs/cgroup" -) - -// GetPids gets pids of the desired cgroup -// Forked from opencontainers/runc/libcontainer/cgroup/fs.Manager.GetPids() -func GetPids(cgroupPath string) ([]int, error) { - dir := "" - - if libcontainercgroups.IsCgroup2UnifiedMode() { - path, err := filepath.Rel("/", cgroupPath) - if err != nil { - return nil, err - } - dir = filepath.Join(CgroupRoot, path) - } else { - var err error - dir, err = getCgroupV1Path(cgroupPath) - if err != nil { - return nil, err - } - } - return libcontainercgroups.GetPids(dir) -} - -// getCgroupV1Path gets the file path to the "devices" subsystem of the desired cgroup. -// cgroupPath is the path in the cgroup hierarchy. -func getCgroupV1Path(cgroupPath string) (string, error) { - cgroupPath = libcontainerutils.CleanPath(cgroupPath) - - mnt, root, err := libcontainercgroups.FindCgroupMountpointAndRoot(cgroupPath, "devices") - // If we didn't mount the subsystem, there is no point we make the path. - if err != nil { - return "", err - } - - // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. - if filepath.IsAbs(cgroupPath) { - // Sometimes subsystems can be mounted together as 'cpu,cpuacct'. - return filepath.Join(root, mnt, cgroupPath), nil - } - - parentPath, err := getCgroupV1ParentPath(mnt, root) - if err != nil { - return "", err - } - - return filepath.Join(parentPath, cgroupPath), nil -} - -// getCgroupV1ParentPath gets the parent filepath to this cgroup, for resolving relative cgroup paths. -func getCgroupV1ParentPath(mountpoint, root string) (string, error) { - // Use GetThisCgroupDir instead of GetInitCgroupDir, because the creating - // process could in container and shared pid namespace with host, and - // /proc/1/cgroup could point to whole other world of cgroups. - initPath, err := libcontainercgroups.GetOwnCgroup("devices") - if err != nil { - return "", err - } - // This is needed for nested containers, because in /proc/self/cgroup we - // see paths from host, which don't exist in container. - relDir, err := filepath.Rel(root, initPath) - if err != nil { - return "", err - } - return filepath.Join(mountpoint, relDir), nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/cgroups_unsupported.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/cgroups_unsupported.go deleted file mode 100644 index b655f41388c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/cgroups_unsupported.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !linux -// +build !linux - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -// GetPids gets pids of the desired cgroup -func GetPids(cgroupPath string) ([]int, error) { - return nil, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go deleted file mode 100644 index b67f6c34fec..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" -) - -// WaitForAPIServerSyncPeriod is the period between checks for the node list/watch initial sync -const WaitForAPIServerSyncPeriod = 1 * time.Second - -// NewSourceApiserver creates a config source that watches and pulls from the apiserver. -func NewSourceApiserver(c clientset.Interface, nodeName types.NodeName, nodeHasSynced func() bool, updates chan<- interface{}) { - lw := cache.NewListWatchFromClient(c.CoreV1().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector("spec.nodeName", string(nodeName))) - - // The Reflector responsible for watching pods at the apiserver should be run only after - // the node sync with the apiserver has completed. - klog.InfoS("Waiting for node sync before watching apiserver pods") - go func() { - for { - if nodeHasSynced() { - klog.V(4).InfoS("node sync completed") - break - } - time.Sleep(WaitForAPIServerSyncPeriod) - klog.V(4).InfoS("node sync has not completed yet") - } - klog.InfoS("Watching apiserver") - newSourceApiserverFromLW(lw, updates) - }() -} - -// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver. -func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) { - send := func(objs []interface{}) { - var pods []*v1.Pod - for _, o := range objs { - pods = append(pods, o.(*v1.Pod)) - } - updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.ApiserverSource} - } - r := cache.NewReflector(lw, &v1.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0) - go r.Run(wait.NeverStop) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go deleted file mode 100644 index a73d6372a47..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "crypto/md5" - "encoding/hex" - "errors" - "fmt" - "strings" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - utilyaml "k8s.io/apimachinery/pkg/util/yaml" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/helper" - - // TODO: remove this import if - // api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String() is changed - // to "v1"? - "k8s.io/kubernetes/pkg/api/legacyscheme" - // Ensure that core apis are installed - _ "k8s.io/kubernetes/pkg/apis/core/install" - k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/core/validation" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - "k8s.io/kubernetes/pkg/util/hash" - - "k8s.io/klog/v2" -) - -const ( - maxConfigLength = 10 * 1 << 20 // 10MB -) - -// Generate a pod name that is unique among nodes by appending the nodeName. -func generatePodName(name string, nodeName types.NodeName) string { - return fmt.Sprintf("%s-%s", name, strings.ToLower(string(nodeName))) -} - -func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName types.NodeName) error { - if len(pod.UID) == 0 { - hasher := md5.New() - hash.DeepHashObject(hasher, pod) - // DeepHashObject resets the hash, so we should write the pod source - // information AFTER it. - if isFile { - fmt.Fprintf(hasher, "host:%s", nodeName) - fmt.Fprintf(hasher, "file:%s", source) - } else { - fmt.Fprintf(hasher, "url:%s", source) - } - pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:])) - klog.V(5).InfoS("Generated UID", "pod", klog.KObj(pod), "podUID", pod.UID, "source", source) - } - - pod.Name = generatePodName(pod.Name, nodeName) - klog.V(5).InfoS("Generated pod name", "pod", klog.KObj(pod), "podUID", pod.UID, "source", source) - - if pod.Namespace == "" { - pod.Namespace = metav1.NamespaceDefault - } - klog.V(5).InfoS("Set namespace for pod", "pod", klog.KObj(pod), "source", source) - - // Set the Host field to indicate this pod is scheduled on the current node. - pod.Spec.NodeName = string(nodeName) - - if pod.Annotations == nil { - pod.Annotations = make(map[string]string) - } - // The generated UID is the hash of the file. - pod.Annotations[kubetypes.ConfigHashAnnotationKey] = string(pod.UID) - - if isFile { - // Applying the default Taint tolerations to static pods, - // so they are not evicted when there are node problems. - helper.AddOrUpdateTolerationInPod(pod, &api.Toleration{ - Operator: "Exists", - Effect: api.TaintEffectNoExecute, - }) - } - - // Set the default status to pending. - pod.Status.Phase = api.PodPending - return nil -} - -type defaultFunc func(pod *api.Pod) error - -// A static pod tried to use a ClusterTrustBundle projected volume source. -var ErrStaticPodTriedToUseClusterTrustBundle = errors.New("static pods may not use ClusterTrustBundle projected volume sources") - -// A static pod tried to use a resource claim. -var ErrStaticPodTriedToUseResourceClaims = errors.New("static pods may not use ResourceClaims") - -// tryDecodeSinglePod takes data and tries to extract valid Pod config information from it. -func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v1.Pod, err error) { - // JSON is valid YAML, so this should work for everything. - json, err := utilyaml.ToJSON(data) - if err != nil { - return false, nil, err - } - obj, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), json) - if err != nil { - return false, pod, err - } - - newPod, ok := obj.(*api.Pod) - // Check whether the object could be converted to single pod. - if !ok { - return false, pod, fmt.Errorf("invalid pod: %#v", obj) - } - - if newPod.Name == "" { - return true, pod, fmt.Errorf("invalid pod: name is needed for the pod") - } - - // Apply default values and validate the pod. - if err = defaultFn(newPod); err != nil { - return true, pod, err - } - if errs := validation.ValidatePodCreate(newPod, validation.PodValidationOptions{}); len(errs) > 0 { - return true, pod, fmt.Errorf("invalid pod: %v", errs) - } - v1Pod := &v1.Pod{} - if err := k8s_api_v1.Convert_core_Pod_To_v1_Pod(newPod, v1Pod, nil); err != nil { - klog.ErrorS(err, "Pod failed to convert to v1", "pod", klog.KObj(newPod)) - return true, nil, err - } - - for _, v := range v1Pod.Spec.Volumes { - if v.Projected == nil { - continue - } - - for _, s := range v.Projected.Sources { - if s.ClusterTrustBundle != nil { - return true, nil, ErrStaticPodTriedToUseClusterTrustBundle - } - } - } - if len(v1Pod.Spec.ResourceClaims) > 0 { - return true, nil, ErrStaticPodTriedToUseResourceClaims - } - - return true, v1Pod, nil -} - -func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods v1.PodList, err error) { - obj, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), data) - if err != nil { - return false, pods, err - } - - newPods, ok := obj.(*api.PodList) - // Check whether the object could be converted to list of pods. - if !ok { - err = fmt.Errorf("invalid pods list: %#v", obj) - return false, pods, err - } - - // Apply default values and validate pods. - for i := range newPods.Items { - newPod := &newPods.Items[i] - if newPod.Name == "" { - return true, pods, fmt.Errorf("invalid pod: name is needed for the pod") - } - if err = defaultFn(newPod); err != nil { - return true, pods, err - } - if errs := validation.ValidatePodCreate(newPod, validation.PodValidationOptions{}); len(errs) > 0 { - err = fmt.Errorf("invalid pod: %v", errs) - return true, pods, err - } - } - v1Pods := &v1.PodList{} - if err := k8s_api_v1.Convert_core_PodList_To_v1_PodList(newPods, v1Pods, nil); err != nil { - return true, pods, err - } - return true, *v1Pods, err -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go deleted file mode 100644 index 10a42c82322..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go +++ /dev/null @@ -1,500 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "context" - "fmt" - "reflect" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/tools/record" - "k8s.io/klog/v2" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/events" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - "k8s.io/kubernetes/pkg/kubelet/util/format" -) - -// PodConfigNotificationMode describes how changes are sent to the update channel. -type PodConfigNotificationMode int - -const ( - // PodConfigNotificationUnknown is the default value for - // PodConfigNotificationMode when uninitialized. - PodConfigNotificationUnknown PodConfigNotificationMode = iota - // PodConfigNotificationSnapshot delivers the full configuration as a SET whenever - // any change occurs. - PodConfigNotificationSnapshot - // PodConfigNotificationSnapshotAndUpdates delivers an UPDATE and DELETE message whenever pods are - // changed, and a SET message if there are any additions or removals. - PodConfigNotificationSnapshotAndUpdates - // PodConfigNotificationIncremental delivers ADD, UPDATE, DELETE, REMOVE, RECONCILE to the update channel. - PodConfigNotificationIncremental -) - -type podStartupSLIObserver interface { - ObservedPodOnWatch(pod *v1.Pod, when time.Time) -} - -// PodConfig is a configuration mux that merges many sources of pod configuration into a single -// consistent structure, and then delivers incremental change notifications to listeners -// in order. -type PodConfig struct { - pods *podStorage - mux *mux - - // the channel of denormalized changes passed to listeners - updates chan kubetypes.PodUpdate - - // contains the list of all configured sources - sourcesLock sync.Mutex - sources sets.Set[string] -} - -// NewPodConfig creates an object that can merge many configuration sources into a stream -// of normalized updates to a pod configuration. -func NewPodConfig(mode PodConfigNotificationMode, recorder record.EventRecorder, startupSLIObserver podStartupSLIObserver) *PodConfig { - updates := make(chan kubetypes.PodUpdate, 50) - storage := newPodStorage(updates, mode, recorder, startupSLIObserver) - podConfig := &PodConfig{ - pods: storage, - mux: newMux(storage), - updates: updates, - sources: sets.Set[string]{}, - } - return podConfig -} - -// Channel creates or returns a config source channel. The channel -// only accepts PodUpdates -func (c *PodConfig) Channel(ctx context.Context, source string) chan<- interface{} { - c.sourcesLock.Lock() - defer c.sourcesLock.Unlock() - c.sources.Insert(source) - return c.mux.ChannelWithContext(ctx, source) -} - -// SeenAllSources returns true if seenSources contains all sources in the -// config, and also this config has received a SET message from each source. -func (c *PodConfig) SeenAllSources(seenSources sets.Set[string]) bool { - if c.pods == nil { - return false - } - c.sourcesLock.Lock() - defer c.sourcesLock.Unlock() - klog.V(5).InfoS("Looking for sources, have seen", "sources", sets.List(c.sources), "seenSources", seenSources) - return seenSources.HasAll(sets.List(c.sources)...) && c.pods.seenSources(sets.List(c.sources)...) -} - -// Updates returns a channel of updates to the configuration, properly denormalized. -func (c *PodConfig) Updates() <-chan kubetypes.PodUpdate { - return c.updates -} - -// Sync requests the full configuration be delivered to the update channel. -func (c *PodConfig) Sync() { - c.pods.sync() -} - -// podStorage manages the current pod state at any point in time and ensures updates -// to the channel are delivered in order. Note that this object is an in-memory source of -// "truth" and on creation contains zero entries. Once all previously read sources are -// available, then this object should be considered authoritative. -type podStorage struct { - podLock sync.RWMutex - // map of source name to pod uid to pod reference - pods map[string]map[types.UID]*v1.Pod - mode PodConfigNotificationMode - - // ensures that updates are delivered in strict order - // on the updates channel - updateLock sync.Mutex - updates chan<- kubetypes.PodUpdate - - // contains the set of all sources that have sent at least one SET - sourcesSeenLock sync.RWMutex - sourcesSeen sets.Set[string] - - // the EventRecorder to use - recorder record.EventRecorder - - startupSLIObserver podStartupSLIObserver -} - -// TODO: PodConfigNotificationMode could be handled by a listener to the updates channel -// in the future, especially with multiple listeners. -// TODO: allow initialization of the current state of the store with snapshotted version. -func newPodStorage(updates chan<- kubetypes.PodUpdate, mode PodConfigNotificationMode, recorder record.EventRecorder, startupSLIObserver podStartupSLIObserver) *podStorage { - return &podStorage{ - pods: make(map[string]map[types.UID]*v1.Pod), - mode: mode, - updates: updates, - sourcesSeen: sets.Set[string]{}, - recorder: recorder, - startupSLIObserver: startupSLIObserver, - } -} - -// Merge normalizes a set of incoming changes from different sources into a map of all Pods -// and ensures that redundant changes are filtered out, and then pushes zero or more minimal -// updates onto the update channel. Ensures that updates are delivered in order. -func (s *podStorage) Merge(source string, change interface{}) error { - s.updateLock.Lock() - defer s.updateLock.Unlock() - - seenBefore := s.sourcesSeen.Has(source) - adds, updates, deletes, removes, reconciles := s.merge(source, change) - firstSet := !seenBefore && s.sourcesSeen.Has(source) - - // deliver update notifications - switch s.mode { - case PodConfigNotificationIncremental: - if len(removes.Pods) > 0 { - s.updates <- *removes - } - if len(adds.Pods) > 0 { - s.updates <- *adds - } - if len(updates.Pods) > 0 { - s.updates <- *updates - } - if len(deletes.Pods) > 0 { - s.updates <- *deletes - } - if firstSet && len(adds.Pods) == 0 && len(updates.Pods) == 0 && len(deletes.Pods) == 0 { - // Send an empty update when first seeing the source and there are - // no ADD or UPDATE or DELETE pods from the source. This signals kubelet that - // the source is ready. - s.updates <- *adds - } - // Only add reconcile support here, because kubelet doesn't support Snapshot update now. - if len(reconciles.Pods) > 0 { - s.updates <- *reconciles - } - - case PodConfigNotificationSnapshotAndUpdates: - if len(removes.Pods) > 0 || len(adds.Pods) > 0 || firstSet { - s.updates <- kubetypes.PodUpdate{Pods: s.mergedState().([]*v1.Pod), Op: kubetypes.SET, Source: source} - } - if len(updates.Pods) > 0 { - s.updates <- *updates - } - if len(deletes.Pods) > 0 { - s.updates <- *deletes - } - - case PodConfigNotificationSnapshot: - if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 || len(removes.Pods) > 0 || firstSet { - s.updates <- kubetypes.PodUpdate{Pods: s.mergedState().([]*v1.Pod), Op: kubetypes.SET, Source: source} - } - - case PodConfigNotificationUnknown: - fallthrough - default: - panic(fmt.Sprintf("unsupported PodConfigNotificationMode: %#v", s.mode)) - } - - return nil -} - -func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes, removes, reconciles *kubetypes.PodUpdate) { - s.podLock.Lock() - defer s.podLock.Unlock() - - addPods := []*v1.Pod{} - updatePods := []*v1.Pod{} - deletePods := []*v1.Pod{} - removePods := []*v1.Pod{} - reconcilePods := []*v1.Pod{} - - pods := s.pods[source] - if pods == nil { - pods = make(map[types.UID]*v1.Pod) - } - - // updatePodFunc is the local function which updates the pod cache *oldPods* with new pods *newPods*. - // After updated, new pod will be stored in the pod cache *pods*. - // Notice that *pods* and *oldPods* could be the same cache. - updatePodsFunc := func(newPods []*v1.Pod, oldPods, pods map[types.UID]*v1.Pod) { - filtered := filterInvalidPods(newPods, source, s.recorder) - for _, ref := range filtered { - // Annotate the pod with the source before any comparison. - if ref.Annotations == nil { - ref.Annotations = make(map[string]string) - } - ref.Annotations[kubetypes.ConfigSourceAnnotationKey] = source - // ignore static pods - if !kubetypes.IsStaticPod(ref) { - s.startupSLIObserver.ObservedPodOnWatch(ref, time.Now()) - } - if existing, found := oldPods[ref.UID]; found { - pods[ref.UID] = existing - needUpdate, needReconcile, needGracefulDelete := checkAndUpdatePod(existing, ref) - if needUpdate { - updatePods = append(updatePods, existing) - } else if needReconcile { - reconcilePods = append(reconcilePods, existing) - } else if needGracefulDelete { - deletePods = append(deletePods, existing) - } - continue - } - recordFirstSeenTime(ref) - pods[ref.UID] = ref - addPods = append(addPods, ref) - } - } - - update := change.(kubetypes.PodUpdate) - switch update.Op { - case kubetypes.ADD, kubetypes.UPDATE, kubetypes.DELETE: - if update.Op == kubetypes.ADD { - klog.V(4).InfoS("Adding new pods from source", "source", source, "pods", klog.KObjSlice(update.Pods)) - } else if update.Op == kubetypes.DELETE { - klog.V(4).InfoS("Gracefully deleting pods from source", "source", source, "pods", klog.KObjSlice(update.Pods)) - } else { - klog.V(4).InfoS("Updating pods from source", "source", source, "pods", klog.KObjSlice(update.Pods)) - } - updatePodsFunc(update.Pods, pods, pods) - - case kubetypes.REMOVE: - klog.V(4).InfoS("Removing pods from source", "source", source, "pods", klog.KObjSlice(update.Pods)) - for _, value := range update.Pods { - if existing, found := pods[value.UID]; found { - // this is a delete - delete(pods, value.UID) - removePods = append(removePods, existing) - continue - } - // this is a no-op - } - - case kubetypes.SET: - klog.V(4).InfoS("Setting pods for source", "source", source) - s.markSourceSet(source) - // Clear the old map entries by just creating a new map - oldPods := pods - pods = make(map[types.UID]*v1.Pod) - updatePodsFunc(update.Pods, oldPods, pods) - for uid, existing := range oldPods { - if _, found := pods[uid]; !found { - // this is a delete - removePods = append(removePods, existing) - } - } - - default: - klog.InfoS("Received invalid update type", "type", update) - - } - - s.pods[source] = pods - - adds = &kubetypes.PodUpdate{Op: kubetypes.ADD, Pods: copyPods(addPods), Source: source} - updates = &kubetypes.PodUpdate{Op: kubetypes.UPDATE, Pods: copyPods(updatePods), Source: source} - deletes = &kubetypes.PodUpdate{Op: kubetypes.DELETE, Pods: copyPods(deletePods), Source: source} - removes = &kubetypes.PodUpdate{Op: kubetypes.REMOVE, Pods: copyPods(removePods), Source: source} - reconciles = &kubetypes.PodUpdate{Op: kubetypes.RECONCILE, Pods: copyPods(reconcilePods), Source: source} - - return adds, updates, deletes, removes, reconciles -} - -func (s *podStorage) markSourceSet(source string) { - s.sourcesSeenLock.Lock() - defer s.sourcesSeenLock.Unlock() - s.sourcesSeen.Insert(source) -} - -func (s *podStorage) seenSources(sources ...string) bool { - s.sourcesSeenLock.RLock() - defer s.sourcesSeenLock.RUnlock() - return s.sourcesSeen.HasAll(sources...) -} - -func filterInvalidPods(pods []*v1.Pod, source string, recorder record.EventRecorder) (filtered []*v1.Pod) { - names := sets.Set[string]{} - for i, pod := range pods { - // Pods from each source are assumed to have passed validation individually. - // This function only checks if there is any naming conflict. - name := kubecontainer.GetPodFullName(pod) - if names.Has(name) { - klog.InfoS("Pod failed validation due to duplicate pod name, ignoring", "index", i, "pod", klog.KObj(pod), "source", source) - recorder.Eventf(pod, v1.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s due to duplicate pod name %q, ignoring", format.Pod(pod), source, pod.Name) - continue - } else { - names.Insert(name) - } - - filtered = append(filtered, pod) - } - return -} - -// Annotations that the kubelet adds to the pod. -var localAnnotations = []string{ - kubetypes.ConfigSourceAnnotationKey, - kubetypes.ConfigMirrorAnnotationKey, - kubetypes.ConfigFirstSeenAnnotationKey, -} - -func isLocalAnnotationKey(key string) bool { - for _, localKey := range localAnnotations { - if key == localKey { - return true - } - } - return false -} - -// isAnnotationMapEqual returns true if the existing annotation Map is equal to candidate except -// for local annotations. -func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool { - if candidateMap == nil { - candidateMap = make(map[string]string) - } - for k, v := range candidateMap { - if isLocalAnnotationKey(k) { - continue - } - if existingValue, ok := existingMap[k]; ok && existingValue == v { - continue - } - return false - } - for k := range existingMap { - if isLocalAnnotationKey(k) { - continue - } - // stale entry in existing map. - if _, exists := candidateMap[k]; !exists { - return false - } - } - return true -} - -// recordFirstSeenTime records the first seen time of this pod. -func recordFirstSeenTime(pod *v1.Pod) { - klog.V(4).InfoS("Receiving a new pod", "pod", klog.KObj(pod)) - pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString() -} - -// updateAnnotations returns an Annotation map containing the api annotation map plus -// locally managed annotations -func updateAnnotations(existing, ref *v1.Pod) { - annotations := make(map[string]string, len(ref.Annotations)+len(localAnnotations)) - for k, v := range ref.Annotations { - annotations[k] = v - } - for _, k := range localAnnotations { - if v, ok := existing.Annotations[k]; ok { - annotations[k] = v - } - } - existing.Annotations = annotations -} - -func podsDifferSemantically(existing, ref *v1.Pod) bool { - if reflect.DeepEqual(existing.Spec, ref.Spec) && - reflect.DeepEqual(existing.Labels, ref.Labels) && - reflect.DeepEqual(existing.DeletionTimestamp, ref.DeletionTimestamp) && - reflect.DeepEqual(existing.DeletionGracePeriodSeconds, ref.DeletionGracePeriodSeconds) && - isAnnotationMapEqual(existing.Annotations, ref.Annotations) { - return false - } - return true -} - -// checkAndUpdatePod updates existing, and: -// - if ref makes a meaningful change, returns needUpdate=true -// - if ref makes a meaningful change, and this change is graceful deletion, returns needGracefulDelete=true -// - if ref makes no meaningful change, but changes the pod status, returns needReconcile=true -// - else return all false -// Now, needUpdate, needGracefulDelete and needReconcile should never be both true -func checkAndUpdatePod(existing, ref *v1.Pod) (needUpdate, needReconcile, needGracefulDelete bool) { - - // 1. this is a reconcile - // TODO: it would be better to update the whole object and only preserve certain things - // like the source annotation or the UID (to ensure safety) - if !podsDifferSemantically(existing, ref) { - // this is not an update - // Only check reconcile when it is not an update, because if the pod is going to - // be updated, an extra reconcile is unnecessary - if !reflect.DeepEqual(existing.Status, ref.Status) { - // Pod with changed pod status needs reconcile, because kubelet should - // be the source of truth of pod status. - existing.Status = ref.Status - needReconcile = true - } - return - } - - // Overwrite the first-seen time with the existing one. This is our own - // internal annotation, there is no need to update. - ref.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = existing.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] - - existing.Spec = ref.Spec - existing.Labels = ref.Labels - existing.DeletionTimestamp = ref.DeletionTimestamp - existing.DeletionGracePeriodSeconds = ref.DeletionGracePeriodSeconds - existing.Generation = ref.Generation - existing.Status = ref.Status - updateAnnotations(existing, ref) - - // 2. this is an graceful delete - if ref.DeletionTimestamp != nil { - needGracefulDelete = true - } else { - // 3. this is an update - needUpdate = true - } - - return -} - -// sync sends a copy of the current state through the update channel. -func (s *podStorage) sync() { - s.updateLock.Lock() - defer s.updateLock.Unlock() - s.updates <- kubetypes.PodUpdate{Pods: s.mergedState().([]*v1.Pod), Op: kubetypes.SET, Source: kubetypes.AllSource} -} - -func (s *podStorage) mergedState() interface{} { - s.podLock.RLock() - defer s.podLock.RUnlock() - pods := make([]*v1.Pod, 0) - for _, sourcePods := range s.pods { - for _, podRef := range sourcePods { - pods = append(pods, podRef.DeepCopy()) - } - } - return pods -} - -func copyPods(sourcePods []*v1.Pod) []*v1.Pod { - pods := []*v1.Pod{} - for _, source := range sourcePods { - // Use a deep copy here just in case - pods = append(pods, source.DeepCopy()) - } - return pods -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go deleted file mode 100644 index b8788f9e431..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -// Defines sane defaults for the kubelet config. -const ( - DefaultKubeletPodsDirName = "pods" - DefaultKubeletVolumesDirName = "volumes" - DefaultKubeletVolumeSubpathsDirName = "volume-subpaths" - DefaultKubeletVolumeDevicesDirName = "volumeDevices" - DefaultKubeletPluginsDirName = "plugins" - DefaultKubeletPluginsRegistrationDirName = "plugins_registry" - DefaultKubeletContainersDirName = "containers" - DefaultKubeletPluginContainersDirName = "plugin-containers" - DefaultKubeletPodResourcesDirName = "pod-resources" - KubeletPluginsDirSELinuxLabel = "system_u:object_r:container_file_t:s0" - KubeletContainersSharedSELinuxLabel = "system_u:object_r:container_file_t:s0" - DefaultKubeletCheckpointsDirName = "checkpoints" - DefaultKubeletUserNamespacesIDsPerPod = 65536 -) diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/doc.go deleted file mode 100644 index 1d839ba325e..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config implements the pod configuration readers. -package config diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go deleted file mode 100644 index 79e2af6ed62..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "k8s.io/klog/v2" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/apis/core" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - utilio "k8s.io/utils/io" -) - -type podEventType int - -const ( - podAdd podEventType = iota - podModify - podDelete - - eventBufferLen = 10 -) - -type watchEvent struct { - fileName string - eventType podEventType -} - -type sourceFile struct { - path string - nodeName types.NodeName - period time.Duration - store cache.Store - fileKeyMapping map[string]string - updates chan<- interface{} - watchEvents chan *watchEvent -} - -// NewSourceFile watches a config file for changes. -func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) { - // "github.com/sigma/go-inotify" requires a path without trailing "/" - path = strings.TrimRight(path, string(os.PathSeparator)) - - config := newSourceFile(path, nodeName, period, updates) - klog.V(1).InfoS("Watching path", "path", path) - config.run() -} - -func newSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) *sourceFile { - send := func(objs []interface{}) { - var pods []*v1.Pod - for _, o := range objs { - pods = append(pods, o.(*v1.Pod)) - } - updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.FileSource} - } - store := cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc) - return &sourceFile{ - path: path, - nodeName: nodeName, - period: period, - store: store, - fileKeyMapping: map[string]string{}, - updates: updates, - watchEvents: make(chan *watchEvent, eventBufferLen), - } -} - -func (s *sourceFile) run() { - listTicker := time.NewTicker(s.period) - - go func() { - // Read path immediately to speed up startup. - if err := s.listConfig(); err != nil { - klog.ErrorS(err, "Unable to read config path", "path", s.path) - } - for { - select { - case <-listTicker.C: - if err := s.listConfig(); err != nil { - klog.ErrorS(err, "Unable to read config path", "path", s.path) - } - case e := <-s.watchEvents: - if err := s.consumeWatchEvent(e); err != nil { - klog.ErrorS(err, "Unable to process watch event") - } - } - } - }() - - s.startWatch() -} - -func (s *sourceFile) applyDefaults(pod *api.Pod, source string) error { - return applyDefaults(pod, source, true, s.nodeName) -} - -func (s *sourceFile) listConfig() error { - path := s.path - statInfo, err := os.Stat(path) - if err != nil { - if !os.IsNotExist(err) { - return err - } - // Emit an update with an empty PodList to allow FileSource to be marked as seen - s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource} - return fmt.Errorf("path does not exist, ignoring") - } - - switch { - case statInfo.Mode().IsDir(): - pods, err := s.extractFromDir(path) - if err != nil { - return err - } - if len(pods) == 0 { - // Emit an update with an empty PodList to allow FileSource to be marked as seen - s.updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.FileSource} - return nil - } - return s.replaceStore(pods...) - - case statInfo.Mode().IsRegular(): - pod, err := s.extractFromFile(path) - if err != nil { - return err - } - return s.replaceStore(pod) - - default: - return fmt.Errorf("path is not a directory or file") - } -} - -// Get as many pod manifests as we can from a directory. Return an error if and only if something -// prevented us from reading anything at all. Do not return an error if only some files -// were problematic. -func (s *sourceFile) extractFromDir(name string) ([]*v1.Pod, error) { - dirents, err := filepath.Glob(filepath.Join(name, "[^.]*")) - if err != nil { - return nil, fmt.Errorf("glob failed: %v", err) - } - - pods := make([]*v1.Pod, 0, len(dirents)) - if len(dirents) == 0 { - return pods, nil - } - - sort.Strings(dirents) - for _, path := range dirents { - statInfo, err := os.Stat(path) - if err != nil { - klog.ErrorS(err, "Could not get metadata", "path", path) - continue - } - - switch { - case statInfo.Mode().IsDir(): - klog.ErrorS(nil, "Provided manifest path is a directory, not recursing into manifest path", "path", path) - case statInfo.Mode().IsRegular(): - pod, err := s.extractFromFile(path) - if err != nil { - if !os.IsNotExist(err) { - klog.ErrorS(err, "Could not process manifest file", "path", path) - } - } else { - pods = append(pods, pod) - } - default: - klog.ErrorS(nil, "Manifest path is not a directory or file", "path", path, "mode", statInfo.Mode()) - } - } - return pods, nil -} - -// extractFromFile parses a file for Pod configuration information. -func (s *sourceFile) extractFromFile(filename string) (pod *v1.Pod, err error) { - klog.V(3).InfoS("Reading config file", "path", filename) - defer func() { - if err == nil && pod != nil { - objKey, keyErr := cache.MetaNamespaceKeyFunc(pod) - if keyErr != nil { - err = keyErr - return - } - s.fileKeyMapping[filename] = objKey - } - }() - - file, err := os.Open(filename) - if err != nil { - return pod, err - } - defer file.Close() - - data, err := utilio.ReadAtMost(file, maxConfigLength) - if err != nil { - return pod, err - } - - defaultFn := func(pod *api.Pod) error { - return s.applyDefaults(pod, filename) - } - - parsed, pod, podErr := tryDecodeSinglePod(data, defaultFn) - if parsed { - if podErr != nil { - return pod, podErr - } - return pod, nil - } - - return pod, fmt.Errorf("%v: couldn't parse as pod(%v), please check config file", filename, podErr) -} - -func (s *sourceFile) replaceStore(pods ...*v1.Pod) (err error) { - objs := []interface{}{} - for _, pod := range pods { - objs = append(objs, pod) - } - return s.store.Replace(objs, "") -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_linux.go deleted file mode 100644 index 42d86f86872..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_linux.go +++ /dev/null @@ -1,154 +0,0 @@ -//go:build linux -// +build linux - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "github.com/fsnotify/fsnotify" - "k8s.io/klog/v2" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/flowcontrol" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" -) - -const ( - retryPeriod = 1 * time.Second - maxRetryPeriod = 20 * time.Second -) - -type retryableError struct { - message string -} - -func (e *retryableError) Error() string { - return e.message -} - -func (s *sourceFile) startWatch() { - backOff := flowcontrol.NewBackOff(retryPeriod, maxRetryPeriod) - backOffID := "watch" - - go wait.Forever(func() { - if backOff.IsInBackOffSinceUpdate(backOffID, time.Now()) { - return - } - - if err := s.doWatch(); err != nil { - klog.ErrorS(err, "Unable to read config path", "path", s.path) - if _, retryable := err.(*retryableError); !retryable { - backOff.Next(backOffID, time.Now()) - } - } - }, retryPeriod) -} - -func (s *sourceFile) doWatch() error { - _, err := os.Stat(s.path) - if err != nil { - if !os.IsNotExist(err) { - return err - } - // Emit an update with an empty PodList to allow FileSource to be marked as seen - s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource} - return &retryableError{"path does not exist, ignoring"} - } - - w, err := fsnotify.NewWatcher() - if err != nil { - return fmt.Errorf("unable to create inotify: %v", err) - } - defer w.Close() - - err = w.Add(s.path) - if err != nil { - return fmt.Errorf("unable to create inotify for path %q: %v", s.path, err) - } - - for { - select { - case event := <-w.Events: - if err = s.produceWatchEvent(&event); err != nil { - return fmt.Errorf("error while processing inotify event (%+v): %v", event, err) - } - case err = <-w.Errors: - return fmt.Errorf("error while watching %q: %v", s.path, err) - } - } -} - -func (s *sourceFile) produceWatchEvent(e *fsnotify.Event) error { - // Ignore file start with dots - if strings.HasPrefix(filepath.Base(e.Name), ".") { - klog.V(4).InfoS("Ignored pod manifest, because it starts with dots", "eventName", e.Name) - return nil - } - var eventType podEventType - switch { - case (e.Op & fsnotify.Create) > 0: - eventType = podAdd - case (e.Op & fsnotify.Write) > 0: - eventType = podModify - case (e.Op & fsnotify.Chmod) > 0: - eventType = podModify - case (e.Op & fsnotify.Remove) > 0: - eventType = podDelete - case (e.Op & fsnotify.Rename) > 0: - eventType = podDelete - default: - // Ignore rest events - return nil - } - - s.watchEvents <- &watchEvent{e.Name, eventType} - return nil -} - -func (s *sourceFile) consumeWatchEvent(e *watchEvent) error { - switch e.eventType { - case podAdd, podModify: - pod, err := s.extractFromFile(e.fileName) - if err != nil { - return fmt.Errorf("can't process config file %q: %v", e.fileName, err) - } - return s.store.Add(pod) - case podDelete: - if objKey, keyExist := s.fileKeyMapping[e.fileName]; keyExist { - pod, podExist, err := s.store.GetByKey(objKey) - if err != nil { - return err - } - if !podExist { - return fmt.Errorf("the pod with key %s doesn't exist in cache", objKey) - } - if err = s.store.Delete(pod); err != nil { - return fmt.Errorf("failed to remove deleted pod from cache: %v", err) - } - delete(s.fileKeyMapping, e.fileName) - } - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_unsupported.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_unsupported.go deleted file mode 100644 index 7d5c77c28d7..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_unsupported.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build !linux -// +build !linux - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - - "k8s.io/klog/v2" -) - -func (s *sourceFile) startWatch() { - klog.ErrorS(nil, "Watching source file is unsupported in this build") -} - -func (s *sourceFile) consumeWatchEvent(e *watchEvent) error { - return fmt.Errorf("consuming watch event is unsupported in this build") -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/flags.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/flags.go deleted file mode 100644 index 4ff13cacf0a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/flags.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - - "github.com/spf13/pflag" -) - -// ContainerRuntimeOptions defines options for the container runtime. -type ContainerRuntimeOptions struct { - // General Options. - - // RuntimeCgroups that container runtime is expected to be isolated in. - RuntimeCgroups string - // PodSandboxImage is the image whose network/ipc namespaces - // containers in each pod will use. - PodSandboxImage string - // Image credential provider plugin options - - // ImageCredentialProviderConfigFile is the path to the credential provider plugin config file. - // This config file is a specification for what credential providers are enabled and invoked - // by the kubelet. The plugin config should contain information about what plugin binary - // to execute and what container images the plugin should be called for. - // +optional - ImageCredentialProviderConfigFile string - // ImageCredentialProviderBinDir is the path to the directory where credential provider plugin - // binaries exist. The name of each plugin binary is expected to match the name of the plugin - // specified in imageCredentialProviderConfigFile. - // +optional - ImageCredentialProviderBinDir string -} - -// AddFlags adds flags to the container runtime, according to ContainerRuntimeOptions. -func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) { - // General settings. - fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.") - fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, fmt.Sprintf("Specified image will not be pruned by the image garbage collector. CRI implementations have their own configuration to set this image.")) - _ = fs.MarkDeprecated("pod-infra-container-image", "will be removed in 1.35. Image garbage collector will get sandbox image information from CRI.") - - // Image credential provider settings. - fs.StringVar(&s.ImageCredentialProviderConfigFile, "image-credential-provider-config", s.ImageCredentialProviderConfigFile, "The path to the credential provider plugin config file.") - fs.StringVar(&s.ImageCredentialProviderBinDir, "image-credential-provider-bin-dir", s.ImageCredentialProviderBinDir, "The path to the directory where credential provider plugin binaries are located.") -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go deleted file mode 100644 index b3760878143..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "bytes" - "fmt" - "net/http" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/wait" - api "k8s.io/kubernetes/pkg/apis/core" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" - utilio "k8s.io/utils/io" -) - -type sourceURL struct { - url string - header http.Header - nodeName types.NodeName - updates chan<- interface{} - data []byte - failureLogs int - client *http.Client -} - -// NewSourceURL specifies the URL where to read the Pod configuration from, then watches it for changes. -func NewSourceURL(url string, header http.Header, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) { - config := &sourceURL{ - url: url, - header: header, - nodeName: nodeName, - updates: updates, - data: nil, - // Timing out requests leads to retries. This client is only used to - // read the manifest URL passed to kubelet. - client: &http.Client{Timeout: 10 * time.Second}, - } - klog.V(1).InfoS("Watching URL", "URL", url) - go wait.Until(config.run, period, wait.NeverStop) -} - -func (s *sourceURL) run() { - if err := s.extractFromURL(); err != nil { - // Don't log this multiple times per minute. The first few entries should be - // enough to get the point across. - if s.failureLogs < 3 { - klog.InfoS("Failed to read pods from URL", "err", err) - } else if s.failureLogs == 3 { - klog.InfoS("Failed to read pods from URL. Dropping verbosity of this message to V(4)", "err", err) - } else { - klog.V(4).InfoS("Failed to read pods from URL", "err", err) - } - s.failureLogs++ - } else { - if s.failureLogs > 0 { - klog.InfoS("Successfully read pods from URL") - s.failureLogs = 0 - } - } -} - -func (s *sourceURL) applyDefaults(pod *api.Pod) error { - return applyDefaults(pod, s.url, false, s.nodeName) -} - -func (s *sourceURL) extractFromURL() error { - req, err := http.NewRequest("GET", s.url, nil) - if err != nil { - return err - } - req.Header = s.header - resp, err := s.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - data, err := utilio.ReadAtMost(resp.Body, maxConfigLength) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%v: %v", s.url, resp.Status) - } - if len(data) == 0 { - // Emit an update with an empty PodList to allow HTTPSource to be marked as seen - s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.HTTPSource} - return fmt.Errorf("zero-length data received from %v", s.url) - } - // Short circuit if the data has not changed since the last time it was read. - if bytes.Equal(data, s.data) { - return nil - } - s.data = data - - // First try as it is a single pod. - parsed, pod, singlePodErr := tryDecodeSinglePod(data, s.applyDefaults) - if parsed { - if singlePodErr != nil { - // It parsed but could not be used. - return singlePodErr - } - s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.HTTPSource} - return nil - } - - // That didn't work, so try a list of pods. - parsed, podList, multiPodErr := tryDecodePodList(data, s.applyDefaults) - if parsed { - if multiPodErr != nil { - // It parsed but could not be used. - return multiPodErr - } - pods := make([]*v1.Pod, 0, len(podList.Items)) - for i := range podList.Items { - pods = append(pods, &podList.Items[i]) - } - s.updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.HTTPSource} - return nil - } - - return fmt.Errorf("%v: received '%v', but couldn't parse as "+ - "single (%v) or multiple pods (%v)", - s.url, string(data), singlePodErr, multiPodErr) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/mux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/mux.go deleted file mode 100644 index a2b3e1e0a5f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/mux.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "context" - "sync" - - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" -) - -type merger interface { - // Invoked when a change from a source is received. May also function as an incremental - // merger if you wish to consume changes incrementally. Must be reentrant when more than - // one source is defined. - Merge(source string, update interface{}) error -} - -// mux is a class for merging configuration from multiple sources. Changes are -// pushed via channels and sent to the merge function. -type mux struct { - // Invoked when an update is sent to a source. - merger merger - - // Sources and their lock. - sourceLock sync.RWMutex - // Maps source names to channels - sources map[string]chan interface{} -} - -// newMux creates a new mux that can merge changes from multiple sources. -func newMux(merger merger) *mux { - mux := &mux{ - sources: make(map[string]chan interface{}), - merger: merger, - } - return mux -} - -// ChannelWithContext returns a channel where a configuration source -// can send updates of new configurations. Multiple calls with the same -// source will return the same channel. This allows change and state based sources -// to use the same channel. Different source names however will be treated as a -// union. -func (m *mux) ChannelWithContext(ctx context.Context, source string) chan interface{} { - if len(source) == 0 { - panic("Channel given an empty name") - } - m.sourceLock.Lock() - defer m.sourceLock.Unlock() - channel, exists := m.sources[source] - if exists { - return channel - } - newChannel := make(chan interface{}) - m.sources[source] = newChannel - - go wait.Until(func() { m.listen(source, newChannel) }, 0, ctx.Done()) - return newChannel -} - -func (m *mux) listen(source string, listenChannel <-chan interface{}) { - for update := range listenChannel { - if err := m.merger.Merge(source, update); err != nil { - klog.InfoS("failed merging update", "err", err) - } - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/sources.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/sources.go deleted file mode 100644 index f1ce89adde7..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/sources.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package config implements the pod configuration readers. -package config - -import ( - "sync" - - "k8s.io/apimachinery/pkg/util/sets" -) - -// SourcesReadyFn is function that returns true if the specified sources have been seen. -type SourcesReadyFn func(sourcesSeen sets.Set[string]) bool - -// SourcesReady tracks the set of configured sources seen by the kubelet. -type SourcesReady interface { - // AddSource adds the specified source to the set of sources managed. - AddSource(source string) - // AllReady returns true if the currently configured sources have all been seen. - AllReady() bool -} - -// NewSourcesReady returns a SourcesReady with the specified function. -func NewSourcesReady(sourcesReadyFn SourcesReadyFn) SourcesReady { - return &sourcesImpl{ - sourcesSeen: sets.New[string](), - sourcesReadyFn: sourcesReadyFn, - } -} - -// sourcesImpl implements SourcesReady. It is thread-safe. -type sourcesImpl struct { - // lock protects access to sources seen. - lock sync.RWMutex - // set of sources seen. - sourcesSeen sets.Set[string] - // sourcesReady is a function that evaluates if the sources are ready. - sourcesReadyFn SourcesReadyFn -} - -// Add adds the specified source to the set of sources managed. -func (s *sourcesImpl) AddSource(source string) { - s.lock.Lock() - defer s.lock.Unlock() - s.sourcesSeen.Insert(source) -} - -// AllReady returns true if each configured source is ready. -func (s *sourcesImpl) AllReady() bool { - s.lock.RLock() - defer s.lock.RUnlock() - return s.sourcesReadyFn(s.sourcesSeen) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/.mockery.yaml b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/.mockery.yaml deleted file mode 100644 index 84489a0095f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/.mockery.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -dir: testing -filename: "mock_{{.InterfaceName | snakecase}}.go" -boilerplate-file: ../../../hack/boilerplate/boilerplate.generatego.txt -outpkg: testing -with-expecter: true -packages: - io/fs: - interfaces: - DirEntry: - config: - filename: mockdirentry.go - k8s.io/kubernetes/pkg/kubelet/container: - interfaces: - Runtime: - config: - filename: runtime_mock.go - RuntimeCache: diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/cache.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/cache.go deleted file mode 100644 index 9b4138634d3..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/cache.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package container - -import ( - "sync" - "time" - - "k8s.io/apimachinery/pkg/types" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/features" -) - -// Cache stores the PodStatus for the pods. It represents *all* the visible -// pods/containers in the container runtime. All cache entries are at least as -// new or newer than the global timestamp (set by UpdateTime()), while -// individual entries may be slightly newer than the global timestamp. If a pod -// has no states known by the runtime, Cache returns an empty PodStatus object -// with ID populated. -// -// Cache provides two methods to retrieve the PodStatus: the non-blocking Get() -// and the blocking GetNewerThan() method. The component responsible for -// populating the cache is expected to call Delete() to explicitly free the -// cache entries. -type Cache interface { - Get(types.UID) (*PodStatus, error) - // Set updates the cache by setting the PodStatus for the pod only - // if the data is newer than the cache based on the provided - // time stamp. Returns if the cache was updated. - Set(types.UID, *PodStatus, error, time.Time) (updated bool) - // GetNewerThan is a blocking call that only returns the status - // when it is newer than the given time. - GetNewerThan(types.UID, time.Time) (*PodStatus, error) - Delete(types.UID) - UpdateTime(time.Time) -} - -type data struct { - // Status of the pod. - status *PodStatus - // Error got when trying to inspect the pod. - err error - // Time when the data was last modified. - modified time.Time -} - -type subRecord struct { - time time.Time - ch chan *data -} - -// cache implements Cache. -type cache struct { - // Lock which guards all internal data structures. - lock sync.RWMutex - // Map that stores the pod statuses. - pods map[types.UID]*data - // A global timestamp represents how fresh the cached data is. All - // cache content is at the least newer than this timestamp. Note that the - // timestamp is nil after initialization, and will only become non-nil when - // it is ready to serve the cached statuses. - timestamp *time.Time - // Map that stores the subscriber records. - subscribers map[types.UID][]*subRecord -} - -// NewCache creates a pod cache. -func NewCache() Cache { - return &cache{pods: map[types.UID]*data{}, subscribers: map[types.UID][]*subRecord{}} -} - -// Get returns the PodStatus for the pod; callers are expected not to -// modify the objects returned. -func (c *cache) Get(id types.UID) (*PodStatus, error) { - c.lock.RLock() - defer c.lock.RUnlock() - d := c.get(id) - return d.status, d.err -} - -func (c *cache) GetNewerThan(id types.UID, minTime time.Time) (*PodStatus, error) { - ch := c.subscribe(id, minTime) - d := <-ch - return d.status, d.err -} - -// Set sets the PodStatus for the pod only if the data is newer than the cache -func (c *cache) Set(id types.UID, status *PodStatus, err error, timestamp time.Time) (updated bool) { - c.lock.Lock() - defer c.lock.Unlock() - - if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) { - // Set the value in the cache only if it's not present already - // or the timestamp in the cache is older than the current update timestamp - if cachedVal, ok := c.pods[id]; ok && cachedVal.modified.After(timestamp) { - return false - } - } - - c.pods[id] = &data{status: status, err: err, modified: timestamp} - c.notify(id, timestamp) - return true -} - -// Delete removes the entry of the pod. -func (c *cache) Delete(id types.UID) { - c.lock.Lock() - defer c.lock.Unlock() - delete(c.pods, id) -} - -// UpdateTime modifies the global timestamp of the cache and notify -// subscribers if needed. -func (c *cache) UpdateTime(timestamp time.Time) { - c.lock.Lock() - defer c.lock.Unlock() - c.timestamp = ×tamp - // Notify all the subscribers if the condition is met. - for id := range c.subscribers { - c.notify(id, *c.timestamp) - } -} - -func makeDefaultData(id types.UID) *data { - return &data{status: &PodStatus{ID: id}, err: nil} -} - -func (c *cache) get(id types.UID) *data { - d, ok := c.pods[id] - if !ok { - // Cache should store *all* pod/container information known by the - // container runtime. A cache miss indicates that there are no states - // regarding the pod last time we queried the container runtime. - // What this *really* means is that there are no visible pod/containers - // associated with this pod. Simply return an default (mostly empty) - // PodStatus to reflect this. - return makeDefaultData(id) - } - return d -} - -// getIfNewerThan returns the data it is newer than the given time. -// Otherwise, it returns nil. The caller should acquire the lock. -func (c *cache) getIfNewerThan(id types.UID, minTime time.Time) *data { - d, ok := c.pods[id] - globalTimestampIsNewer := (c.timestamp != nil && c.timestamp.After(minTime)) - if !ok && globalTimestampIsNewer { - // Status is not cached, but the global timestamp is newer than - // minTime, return the default status. - return makeDefaultData(id) - } - if ok && (d.modified.After(minTime) || globalTimestampIsNewer) { - // Status is cached, return status if either of the following is true. - // * status was modified after minTime - // * the global timestamp of the cache is newer than minTime. - return d - } - // The pod status is not ready. - return nil -} - -// notify sends notifications for pod with the given id, if the requirements -// are met. Note that the caller should acquire the lock. -func (c *cache) notify(id types.UID, timestamp time.Time) { - list, ok := c.subscribers[id] - if !ok { - // No one to notify. - return - } - newList := []*subRecord{} - for i, r := range list { - if timestamp.Before(r.time) { - // Doesn't meet the time requirement; keep the record. - newList = append(newList, list[i]) - continue - } - r.ch <- c.get(id) - close(r.ch) - } - if len(newList) == 0 { - delete(c.subscribers, id) - } else { - c.subscribers[id] = newList - } -} - -func (c *cache) subscribe(id types.UID, timestamp time.Time) chan *data { - ch := make(chan *data, 1) - c.lock.Lock() - defer c.lock.Unlock() - d := c.getIfNewerThan(id, timestamp) - if d != nil { - // If the cache entry is ready, send the data and return immediately. - ch <- d - return ch - } - // Add the subscription record. - c.subscribers[id] = append(c.subscribers[id], &subRecord{time: timestamp, ch: ch}) - return ch -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go deleted file mode 100644 index b0a25d50058..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package container - -import ( - "context" - "fmt" - "time" - - "k8s.io/klog/v2" -) - -// GCPolicy specifies a policy for garbage collecting containers. -type GCPolicy struct { - // Minimum age at which a container can be garbage collected, zero for no limit. - MinAge time.Duration - - // Max number of dead containers any single pod (UID, container name) pair is - // allowed to have, less than zero for no limit. - MaxPerPodContainer int - - // Max number of total dead containers, less than zero for no limit. - MaxContainers int -} - -// GC manages garbage collection of dead containers. -// -// Implementation is thread-compatible. -type GC interface { - // Garbage collect containers. - GarbageCollect(ctx context.Context) error - // Deletes all unused containers, including containers belonging to pods that are terminated but not deleted - DeleteAllUnusedContainers(ctx context.Context) error -} - -// SourcesReadyProvider knows how to determine if configuration sources are ready -type SourcesReadyProvider interface { - // AllReady returns true if the currently configured sources have all been seen. - AllReady() bool -} - -// TODO(vmarmol): Preferentially remove pod infra containers. -type realContainerGC struct { - // Container runtime - runtime Runtime - - // Policy for garbage collection. - policy GCPolicy - - // sourcesReadyProvider provides the readiness of kubelet configuration sources. - sourcesReadyProvider SourcesReadyProvider -} - -// NewContainerGC creates a new instance of GC with the specified policy. -func NewContainerGC(runtime Runtime, policy GCPolicy, sourcesReadyProvider SourcesReadyProvider) (GC, error) { - if policy.MinAge < 0 { - return nil, fmt.Errorf("invalid minimum garbage collection age: %v", policy.MinAge) - } - - return &realContainerGC{ - runtime: runtime, - policy: policy, - sourcesReadyProvider: sourcesReadyProvider, - }, nil -} - -func (cgc *realContainerGC) GarbageCollect(ctx context.Context) error { - return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), false) -} - -func (cgc *realContainerGC) DeleteAllUnusedContainers(ctx context.Context) error { - klog.InfoS("Attempting to delete unused containers") - return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), true) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go deleted file mode 100644 index 9e6780310e6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go +++ /dev/null @@ -1,444 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package container - -import ( - "context" - "encoding/json" - "fmt" - "hash/fnv" - "strings" - - "k8s.io/klog/v2" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/tools/record" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - sc "k8s.io/kubernetes/pkg/securitycontext" - hashutil "k8s.io/kubernetes/pkg/util/hash" - "k8s.io/kubernetes/third_party/forked/golang/expansion" - utilsnet "k8s.io/utils/net" -) - -// HandlerRunner runs a lifecycle handler for a container. -type HandlerRunner interface { - Run(ctx context.Context, containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) -} - -// RuntimeHelper wraps kubelet to make container runtime -// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. -type RuntimeHelper interface { - GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string, imageVolumes ImageVolumes) (contOpts *RunContainerOptions, cleanupAction func(), err error) - GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error) - // GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host - // of a pod. - GetPodCgroupParent(pod *v1.Pod) string - GetPodDir(podUID types.UID) string - GeneratePodHostNameAndDomain(pod *v1.Pod) (hostname string, hostDomain string, err error) - // GetExtraSupplementalGroupsForPod returns a list of the extra - // supplemental groups for the Pod. These extra supplemental groups come - // from annotations on persistent volumes that the pod depends on. - GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 - - // GetOrCreateUserNamespaceMappings returns the configuration for the sandbox user namespace - GetOrCreateUserNamespaceMappings(pod *v1.Pod, runtimeHandler string) (*runtimeapi.UserNamespace, error) - - // PrepareDynamicResources prepares resources for a pod. - PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error - - // UnprepareDynamicResources unprepares resources for a a pod. - UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error - - // SetPodWatchCondition flags a pod to be inspected until the condition is met. - SetPodWatchCondition(types.UID, string, func(*PodStatus) bool) -} - -// ShouldContainerBeRestarted checks whether a container needs to be restarted. -// TODO(yifan): Think about how to refactor this. -func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus *PodStatus) bool { - // Once a pod has been marked deleted, it should not be restarted - if pod.DeletionTimestamp != nil { - return false - } - // Get latest container status. - status := podStatus.FindContainerStatusByName(container.Name) - // If the container was never started before, we should start it. - // NOTE(random-liu): If all historical containers were GC'd, we'll also return true here. - if status == nil { - return true - } - // Check whether container is running - if status.State == ContainerStateRunning { - return false - } - // Always restart container in the unknown, or in the created state. - if status.State == ContainerStateUnknown || status.State == ContainerStateCreated { - return true - } - // Check RestartPolicy for dead container - if pod.Spec.RestartPolicy == v1.RestartPolicyNever { - klog.V(4).InfoS("Already ran container, do nothing", "pod", klog.KObj(pod), "containerName", container.Name) - return false - } - if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure { - // Check the exit code. - if status.ExitCode == 0 { - klog.V(4).InfoS("Already successfully ran container, do nothing", "pod", klog.KObj(pod), "containerName", container.Name) - return false - } - } - return true -} - -// HashContainer returns the hash of the container. It is used to compare -// the running container with its desired spec. -// Note: remember to update hashValues in container_hash_test.go as well. -func HashContainer(container *v1.Container) uint64 { - hash := fnv.New32a() - containerJSON, _ := json.Marshal(pickFieldsToHash(container)) - hashutil.DeepHashObject(hash, containerJSON) - return uint64(hash.Sum32()) -} - -// pickFieldsToHash pick fields that will affect the running status of the container for hash, -// currently this field range only contains `image` and `name`. -// Note: this list must be updated if ever kubelet wants to allow mutations to other fields. -func pickFieldsToHash(container *v1.Container) map[string]string { - retval := map[string]string{ - "name": container.Name, - "image": container.Image, - } - return retval -} - -// envVarsToMap constructs a map of environment name to value from a slice -// of env vars. -func envVarsToMap(envs []EnvVar) map[string]string { - result := map[string]string{} - for _, env := range envs { - result[env.Name] = env.Value - } - return result -} - -// v1EnvVarsToMap constructs a map of environment name to value from a slice -// of env vars. -func v1EnvVarsToMap(envs []v1.EnvVar) map[string]string { - result := map[string]string{} - for _, env := range envs { - result[env.Name] = env.Value - } - - return result -} - -// ExpandContainerCommandOnlyStatic substitutes only static environment variable values from the -// container environment definitions. This does *not* include valueFrom substitutions. -// TODO: callers should use ExpandContainerCommandAndArgs with a fully resolved list of environment. -func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVar) (command []string) { - mapping := expansion.MappingFuncFor(v1EnvVarsToMap(envs)) - if len(containerCommand) != 0 { - for _, cmd := range containerCommand { - command = append(command, expansion.Expand(cmd, mapping)) - } - } - return command -} - -// ExpandContainerVolumeMounts expands the subpath of the given VolumeMount by replacing variable references with the values of given EnvVar. -func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, error) { - - envmap := envVarsToMap(envs) - missingKeys := sets.New[string]() - expanded := expansion.Expand(mount.SubPathExpr, func(key string) string { - value, ok := envmap[key] - if !ok || len(value) == 0 { - missingKeys.Insert(key) - } - return value - }) - - if len(missingKeys) > 0 { - return "", fmt.Errorf("missing value for %s", strings.Join(sets.List(missingKeys), ", ")) - } - return expanded, nil -} - -// ExpandContainerCommandAndArgs expands the given Container's command by replacing variable references `with the values of given EnvVar. -func ExpandContainerCommandAndArgs(container *v1.Container, envs []EnvVar) (command []string, args []string) { - mapping := expansion.MappingFuncFor(envVarsToMap(envs)) - - if len(container.Command) != 0 { - for _, cmd := range container.Command { - command = append(command, expansion.Expand(cmd, mapping)) - } - } - - if len(container.Args) != 0 { - for _, arg := range container.Args { - args = append(args, expansion.Expand(arg, mapping)) - } - } - - return command, args -} - -// FilterEventRecorder creates an event recorder to record object's event except implicitly required container's, like infra container. -func FilterEventRecorder(recorder record.EventRecorder) record.EventRecorder { - return &innerEventRecorder{ - recorder: recorder, - } -} - -type innerEventRecorder struct { - recorder record.EventRecorder -} - -func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*v1.ObjectReference, bool) { - if ref, ok := object.(*v1.ObjectReference); ok { - // this check is needed AFTER the cast. See https://github.com/kubernetes/kubernetes/issues/95552 - if ref == nil { - return nil, false - } - if !strings.HasPrefix(ref.FieldPath, ImplicitContainerPrefix) { - return ref, true - } - } - return nil, false -} - -func (irecorder *innerEventRecorder) Event(object runtime.Object, eventtype, reason, message string) { - if ref, ok := irecorder.shouldRecordEvent(object); ok { - irecorder.recorder.Event(ref, eventtype, reason, message) - } -} - -func (irecorder *innerEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - if ref, ok := irecorder.shouldRecordEvent(object); ok { - irecorder.recorder.Eventf(ref, eventtype, reason, messageFmt, args...) - } - -} - -func (irecorder *innerEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - if ref, ok := irecorder.shouldRecordEvent(object); ok { - irecorder.recorder.AnnotatedEventf(ref, annotations, eventtype, reason, messageFmt, args...) - } - -} - -// IsHostNetworkPod returns whether the host networking requested for the given Pod. -// Pod must not be nil. -func IsHostNetworkPod(pod *v1.Pod) bool { - return pod.Spec.HostNetwork -} - -// ConvertPodStatusToRunningPod returns Pod given PodStatus and container runtime string. -// TODO(random-liu): Convert PodStatus to running Pod, should be deprecated soon -func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod { - runningPod := Pod{ - ID: podStatus.ID, - Name: podStatus.Name, - Namespace: podStatus.Namespace, - } - for _, containerStatus := range podStatus.ContainerStatuses { - if containerStatus.State != ContainerStateRunning { - continue - } - container := &Container{ - ID: containerStatus.ID, - Name: containerStatus.Name, - Image: containerStatus.Image, - ImageID: containerStatus.ImageID, - ImageRef: containerStatus.ImageRef, - ImageRuntimeHandler: containerStatus.ImageRuntimeHandler, - Hash: containerStatus.Hash, - State: containerStatus.State, - } - runningPod.Containers = append(runningPod.Containers, container) - } - - // Populate sandboxes in kubecontainer.Pod - for _, sandbox := range podStatus.SandboxStatuses { - runningPod.Sandboxes = append(runningPod.Sandboxes, &Container{ - ID: ContainerID{Type: runtimeName, ID: sandbox.Id}, - State: SandboxToContainerState(sandbox.State), - }) - } - return runningPod -} - -// SandboxToContainerState converts runtimeapi.PodSandboxState to -// kubecontainer.State. -// This is only needed because we need to return sandboxes as if they were -// kubecontainer.Containers to avoid substantial changes to PLEG. -// TODO: Remove this once it becomes obsolete. -func SandboxToContainerState(state runtimeapi.PodSandboxState) State { - switch state { - case runtimeapi.PodSandboxState_SANDBOX_READY: - return ContainerStateRunning - case runtimeapi.PodSandboxState_SANDBOX_NOTREADY: - return ContainerStateExited - } - return ContainerStateUnknown -} - -// GetContainerSpec gets the container spec by containerName. -func GetContainerSpec(pod *v1.Pod, containerName string) *v1.Container { - var containerSpec *v1.Container - podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool { - if containerName == c.Name { - containerSpec = c - return false - } - return true - }) - return containerSpec -} - -// HasPrivilegedContainer returns true if any of the containers in the pod are privileged. -func HasPrivilegedContainer(pod *v1.Pod) bool { - var hasPrivileged bool - podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool { - if c.SecurityContext != nil && c.SecurityContext.Privileged != nil && *c.SecurityContext.Privileged { - hasPrivileged = true - return false - } - return true - }) - return hasPrivileged -} - -// HasWindowsHostProcessContainer returns true if any of the containers in a pod are HostProcess containers. -func HasWindowsHostProcessContainer(pod *v1.Pod) bool { - var hasHostProcess bool - podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool { - if sc.HasWindowsHostProcessRequest(pod, c) { - hasHostProcess = true - return false - } - return true - }) - - return hasHostProcess -} - -// AllContainersAreWindowsHostProcess returns true if all containers in a pod are HostProcess containers. -func AllContainersAreWindowsHostProcess(pod *v1.Pod) bool { - allHostProcess := true - podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool { - if !sc.HasWindowsHostProcessRequest(pod, c) { - allHostProcess = false - return false - } - return true - }) - - return allHostProcess -} - -// MakePortMappings creates internal port mapping from api port mapping. -func MakePortMappings(container *v1.Container) (ports []PortMapping) { - names := make(map[string]struct{}) - for _, p := range container.Ports { - pm := PortMapping{ - HostPort: int(p.HostPort), - ContainerPort: int(p.ContainerPort), - Protocol: p.Protocol, - HostIP: p.HostIP, - } - - // We need to determine the address family this entry applies to. We do this to ensure - // duplicate containerPort / protocol rules work across different address families. - // https://github.com/kubernetes/kubernetes/issues/82373 - family := "any" - if p.HostIP != "" { - if utilsnet.IsIPv6String(p.HostIP) { - family = "v6" - } else { - family = "v4" - } - } - - var name = p.Name - if name == "" { - name = fmt.Sprintf("%s-%s-%s:%d:%d", family, p.Protocol, p.HostIP, p.ContainerPort, p.HostPort) - } - - // Protect against a port name being used more than once in a container. - if _, ok := names[name]; ok { - klog.InfoS("Port name conflicted, it is defined more than once", "portName", name) - continue - } - ports = append(ports, pm) - names[name] = struct{}{} - } - return -} - -// HasAnyRegularContainerStarted returns true if any regular container has -// started, which indicates all init containers have been initialized. -// Deprecated: This function is not accurate when its pod sandbox is recreated. -// Use HasAnyActiveRegularContainerStarted instead. -func HasAnyRegularContainerStarted(spec *v1.PodSpec, statuses []v1.ContainerStatus) bool { - if len(statuses) == 0 { - return false - } - - containerNames := make(map[string]struct{}) - for _, c := range spec.Containers { - containerNames[c.Name] = struct{}{} - } - - for _, status := range statuses { - if _, ok := containerNames[status.Name]; !ok { - continue - } - if status.State.Running != nil || status.State.Terminated != nil { - return true - } - } - - return false -} - -// HasAnyActiveRegularContainerStarted returns true if any regular container of -// the current pod sandbox has started, which indicates all init containers -// have been initialized. -func HasAnyActiveRegularContainerStarted(spec *v1.PodSpec, podStatus *PodStatus) bool { - if podStatus == nil { - return false - } - - containerNames := sets.New[string]() - for _, c := range spec.Containers { - containerNames.Insert(c.Name) - } - - for _, status := range podStatus.ActiveContainerStatuses { - if !containerNames.Has(status.Name) { - continue - } - return true - } - - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/os.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/os.go deleted file mode 100644 index e0642a0f1cb..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/os.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package container - -import ( - "os" - "path/filepath" - "time" -) - -// OSInterface collects system level operations that need to be mocked out -// during tests. -type OSInterface interface { - MkdirAll(path string, perm os.FileMode) error - Symlink(oldname string, newname string) error - Stat(path string) (os.FileInfo, error) - Remove(path string) error - RemoveAll(path string) error - Create(path string) (*os.File, error) - Chmod(path string, perm os.FileMode) error - Hostname() (name string, err error) - Chtimes(path string, atime time.Time, mtime time.Time) error - Pipe() (r *os.File, w *os.File, err error) - ReadDir(dirname string) ([]os.DirEntry, error) - Glob(pattern string) ([]string, error) - Open(name string) (*os.File, error) - OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) - Rename(oldpath, newpath string) error -} - -// RealOS is used to dispatch the real system level operations. -type RealOS struct{} - -// MkdirAll will call os.MkdirAll to create a directory. -func (RealOS) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// Symlink will call os.Symlink to create a symbolic link. -func (RealOS) Symlink(oldname string, newname string) error { - return os.Symlink(oldname, newname) -} - -// Stat will call os.Stat to get the FileInfo for a given path -func (RealOS) Stat(path string) (os.FileInfo, error) { - return os.Stat(path) -} - -// Remove will call os.Remove to remove the path. -func (RealOS) Remove(path string) error { - return os.Remove(path) -} - -// RemoveAll will call os.RemoveAll to remove the path and its children. -func (RealOS) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -// Create will call os.Create to create and return a file -// at path. -func (RealOS) Create(path string) (*os.File, error) { - return os.Create(path) -} - -// Chmod will change the permissions on the specified path or return -// an error. -func (RealOS) Chmod(path string, perm os.FileMode) error { - return os.Chmod(path, perm) -} - -// Hostname will call os.Hostname to return the hostname. -func (RealOS) Hostname() (name string, err error) { - return os.Hostname() -} - -// Chtimes will call os.Chtimes to change the atime and mtime of the path -func (RealOS) Chtimes(path string, atime time.Time, mtime time.Time) error { - return os.Chtimes(path, atime, mtime) -} - -// Pipe will call os.Pipe to return a connected pair of pipe. -func (RealOS) Pipe() (r *os.File, w *os.File, err error) { - return os.Pipe() -} - -// ReadDir will call os.ReadDir to return the files under the directory. -func (RealOS) ReadDir(dirname string) ([]os.DirEntry, error) { - return os.ReadDir(dirname) -} - -// Glob will call filepath.Glob to return the names of all files matching -// pattern. -func (RealOS) Glob(pattern string) ([]string, error) { - return filepath.Glob(pattern) -} - -// Open will call os.Open to return the file. -func (RealOS) Open(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFile will call os.OpenFile to return the file. -func (RealOS) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// Rename will call os.Rename to rename a file. -func (RealOS) Rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/ref.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/ref.go deleted file mode 100644 index 3d84a963e59..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/ref.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package container - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - ref "k8s.io/client-go/tools/reference" - "k8s.io/kubernetes/pkg/api/legacyscheme" -) - -// ImplicitContainerPrefix is a container name prefix that will indicate that container was started implicitly (like the pod infra container). -var ImplicitContainerPrefix = "implicitly required container " - -// GenerateContainerRef returns an *v1.ObjectReference which references the given container -// within the given pod. Returns an error if the reference can't be constructed or the -// container doesn't actually belong to the pod. -func GenerateContainerRef(pod *v1.Pod, container *v1.Container) (*v1.ObjectReference, error) { - fieldPath, err := fieldPath(pod, container) - if err != nil { - // TODO: figure out intelligent way to refer to containers that we implicitly - // start (like the pod infra container). This is not a good way, ugh. - fieldPath = ImplicitContainerPrefix + container.Name - } - ref, err := ref.GetPartialReference(legacyscheme.Scheme, pod, fieldPath) - if err != nil { - return nil, err - } - return ref, nil -} - -// fieldPath returns a fieldPath locating container within pod. -// Returns an error if the container isn't part of the pod. -func fieldPath(pod *v1.Pod, container *v1.Container) (string, error) { - for i := range pod.Spec.Containers { - here := &pod.Spec.Containers[i] - if here.Name == container.Name { - if here.Name == "" { - return fmt.Sprintf("spec.containers[%d]", i), nil - } - return fmt.Sprintf("spec.containers{%s}", here.Name), nil - } - } - for i := range pod.Spec.InitContainers { - here := &pod.Spec.InitContainers[i] - if here.Name == container.Name { - if here.Name == "" { - return fmt.Sprintf("spec.initContainers[%d]", i), nil - } - return fmt.Sprintf("spec.initContainers{%s}", here.Name), nil - } - } - for i := range pod.Spec.EphemeralContainers { - here := &pod.Spec.EphemeralContainers[i] - if here.Name == container.Name { - if here.Name == "" { - return fmt.Sprintf("spec.ephemeralContainers[%d]", i), nil - } - return fmt.Sprintf("spec.ephemeralContainers{%s}", here.Name), nil - } - } - return "", fmt.Errorf("container %q not found in pod %s/%s", container.Name, pod.Namespace, pod.Name) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go deleted file mode 100644 index 02f81725f03..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go +++ /dev/null @@ -1,795 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//go:generate mockery -package container - -import ( - "context" - "fmt" - "io" - "net/url" - "reflect" - "strings" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/remotecommand" - "k8s.io/client-go/util/flowcontrol" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/credentialprovider" - kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" - "k8s.io/kubernetes/pkg/volume" -) - -// Version interface allow to consume the runtime versions - compare and format to string. -type Version interface { - // Compare compares two versions of the runtime. On success it returns -1 - // if the version is less than the other, 1 if it is greater than the other, - // or 0 if they are equal. - Compare(other string) (int, error) - // String returns a string that represents the version. - String() string -} - -// ImageSpec is an internal representation of an image. Currently, it wraps the -// value of a Container's Image field, but in the future it will include more detailed -// information about the different image types. -type ImageSpec struct { - // ID of the image. - Image string - // Runtime handler used to pull this image - RuntimeHandler string - // The annotations for the image. - // This should be passed to CRI during image pulls and returned when images are listed. - Annotations []Annotation -} - -// ImageStats contains statistics about all the images currently available. -type ImageStats struct { - // Total amount of storage consumed by existing images. - TotalStorageBytes uint64 -} - -// Runtime interface defines the interfaces that should be implemented -// by a container runtime. -// Thread safety is required from implementations of this interface. -type Runtime interface { - // Type returns the type of the container runtime. - Type() string - - // Version returns the version information of the container runtime. - Version(ctx context.Context) (Version, error) - - // APIVersion returns the cached API version information of the container - // runtime. Implementation is expected to update this cache periodically. - // This may be different from the runtime engine's version. - // TODO(random-liu): We should fold this into Version() - APIVersion() (Version, error) - // Status returns the status of the runtime. An error is returned if the Status - // function itself fails, nil otherwise. - Status(ctx context.Context) (*RuntimeStatus, error) - // GetPods returns a list of containers grouped by pods. The boolean parameter - // specifies whether the runtime returns all containers including those already - // exited and dead containers (used for garbage collection). - GetPods(ctx context.Context, all bool) ([]*Pod, error) - // GarbageCollect removes dead containers using the specified container gc policy - // If allSourcesReady is not true, it means that kubelet doesn't have the - // complete list of pods from all available sources (e.g., apiserver, http, - // file). In this case, garbage collector should refrain itself from aggressive - // behavior such as removing all containers of unrecognized pods (yet). - // If evictNonDeletedPods is set to true, containers and sandboxes belonging to pods - // that are terminated, but not deleted will be evicted. Otherwise, only deleted pods - // will be GC'd. - // TODO: Revisit this method and make it cleaner. - GarbageCollect(ctx context.Context, gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error - // SyncPod syncs the running pod into the desired pod. - SyncPod(ctx context.Context, pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult - // KillPod kills all the containers of a pod. Pod may be nil, running pod must not be. - // TODO(random-liu): Return PodSyncResult in KillPod. - // gracePeriodOverride if specified allows the caller to override the pod default grace period. - // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. - // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. - KillPod(ctx context.Context, pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error - // GetPodStatus retrieves the status of the pod, including the - // information of all containers in the pod that are visible in Runtime. - GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*PodStatus, error) - // TODO(vmarmol): Unify pod and containerID args. - // GetContainerLogs returns logs of a specific container. By - // default, it returns a snapshot of the container log. Set 'follow' to true to - // stream the log. Set 'follow' to false and specify the number of lines (e.g. - // "100" or "all") to tail the log. - GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) - // DeleteContainer deletes a container. If the container is still running, an error is returned. - DeleteContainer(ctx context.Context, containerID ContainerID) error - // ImageService provides methods to image-related methods. - ImageService - // UpdatePodCIDR sends a new podCIDR to the runtime. - // This method just proxies a new runtimeConfig with the updated - // CIDR value down to the runtime shim. - UpdatePodCIDR(ctx context.Context, podCIDR string) error - // CheckpointContainer tells the runtime to checkpoint a container - // and store the resulting archive to the checkpoint directory. - CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error - // Generate pod status from the CRI event - GeneratePodStatus(event *runtimeapi.ContainerEventResponse) (*PodStatus, error) - // ListMetricDescriptors gets the descriptors for the metrics that will be returned in ListPodSandboxMetrics. - // This list should be static at startup: either the client and server restart together when - // adding or removing metrics descriptors, or they should not change. - // Put differently, if ListPodSandboxMetrics references a name that is not described in the initial - // ListMetricDescriptors call, then the metric will not be broadcasted. - ListMetricDescriptors(ctx context.Context) ([]*runtimeapi.MetricDescriptor, error) - // ListPodSandboxMetrics retrieves the metrics for all pod sandboxes. - ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error) - // GetContainerStatus returns the status for the container. - GetContainerStatus(ctx context.Context, id ContainerID) (*Status, error) - // GetContainerSwapBehavior reports whether a container could be swappable. - // This is used to decide whether to handle InPlacePodVerticalScaling for containers. - GetContainerSwapBehavior(pod *v1.Pod, container *v1.Container) kubelettypes.SwapBehavior -} - -// StreamingRuntime is the interface implemented by runtimes that handle the serving of the -// streaming calls (exec/attach/port-forward) themselves. In this case, Kubelet should redirect to -// the runtime server. -type StreamingRuntime interface { - GetExec(ctx context.Context, id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) - GetAttach(ctx context.Context, id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) - GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) -} - -// ImageService interfaces allows to work with image service. -type ImageService interface { - // PullImage pulls an image from the network to local storage using the supplied - // secrets if necessary. - // It returns a reference (digest or ID) to the pulled image and the credentials - // that were used to pull the image. If the returned credentials are nil, the - // pull was anonymous. - PullImage(ctx context.Context, image ImageSpec, credentials []credentialprovider.TrackedAuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, *credentialprovider.TrackedAuthConfig, error) - // GetImageRef gets the reference (digest or ID) of the image which has already been in - // the local storage. It returns ("", nil) if the image isn't in the local storage. - GetImageRef(ctx context.Context, image ImageSpec) (string, error) - // ListImages gets all images currently on the machine. - ListImages(ctx context.Context) ([]Image, error) - // RemoveImage removes the specified image. - RemoveImage(ctx context.Context, image ImageSpec) error - // ImageStats returns Image statistics. - ImageStats(ctx context.Context) (*ImageStats, error) - // ImageFsInfo returns a list of file systems for containers/images - ImageFsInfo(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error) - // GetImageSize returns the size of the image - GetImageSize(ctx context.Context, image ImageSpec) (uint64, error) -} - -// Attacher interface allows to attach a container. -type Attacher interface { - AttachContainer(ctx context.Context, id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error) -} - -// CommandRunner interface allows to run command in a container. -type CommandRunner interface { - // RunInContainer synchronously executes the command in the container, and returns the output. - // If the command completes with a non-0 exit code, a k8s.io/utils/exec.ExitError will be returned. - RunInContainer(ctx context.Context, id ContainerID, cmd []string, timeout time.Duration) ([]byte, error) -} - -// Pod is a group of containers. -type Pod struct { - // The ID of the pod, which can be used to retrieve a particular pod - // from the pod list returned by GetPods(). - ID types.UID - // The name and namespace of the pod, which is readable by human. - Name string - Namespace string - // Creation timestamps of the Pod in nanoseconds. - CreatedAt uint64 - // List of containers that belongs to this pod. It may contain only - // running containers, or mixed with dead ones (when GetPods(true)). - Containers []*Container - // List of sandboxes associated with this pod. The sandboxes are converted - // to Container temporarily to avoid substantial changes to other - // components. This is only populated by kuberuntime. - // TODO: use the runtimeApi.PodSandbox type directly. - Sandboxes []*Container -} - -// PodPair contains both runtime#Pod and api#Pod -type PodPair struct { - // APIPod is the v1.Pod - APIPod *v1.Pod - // RunningPod is the pod defined in pkg/kubelet/container/runtime#Pod - RunningPod *Pod -} - -// ContainerID is a type that identifies a container. -type ContainerID struct { - // The type of the container runtime. e.g. 'docker'. - Type string - // The identification of the container, this is comsumable by - // the underlying container runtime. (Note that the container - // runtime interface still takes the whole struct as input). - ID string -} - -// BuildContainerID returns the ContainerID given type and id. -func BuildContainerID(typ, ID string) ContainerID { - return ContainerID{Type: typ, ID: ID} -} - -// ParseContainerID is a convenience method for creating a ContainerID from an ID string. -func ParseContainerID(containerID string) ContainerID { - var id ContainerID - if err := id.ParseString(containerID); err != nil { - klog.ErrorS(err, "Parsing containerID failed") - } - return id -} - -// ParseString converts given string into ContainerID -func (c *ContainerID) ParseString(data string) error { - // Trim the quotes and split the type and ID. - parts := strings.Split(strings.Trim(data, "\""), "://") - if len(parts) != 2 { - return fmt.Errorf("invalid container ID: %q", data) - } - c.Type, c.ID = parts[0], parts[1] - return nil -} - -func (c *ContainerID) String() string { - return fmt.Sprintf("%s://%s", c.Type, c.ID) -} - -// IsEmpty returns whether given ContainerID is empty. -func (c *ContainerID) IsEmpty() bool { - return *c == ContainerID{} -} - -// MarshalJSON formats a given ContainerID into a byte array. -func (c *ContainerID) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("%q", c.String())), nil -} - -// UnmarshalJSON parses ContainerID from a given array of bytes. -func (c *ContainerID) UnmarshalJSON(data []byte) error { - return c.ParseString(string(data)) -} - -// State represents the state of a container -type State string - -const ( - // ContainerStateCreated indicates a container that has been created (e.g. with docker create) but not started. - ContainerStateCreated State = "created" - // ContainerStateRunning indicates a currently running container. - ContainerStateRunning State = "running" - // ContainerStateExited indicates a container that ran and completed ("stopped" in other contexts, although a created container is technically also "stopped"). - ContainerStateExited State = "exited" - // ContainerStateUnknown encompasses all the states that we currently don't care about (like restarting, paused, dead). - ContainerStateUnknown State = "unknown" -) - -// ContainerReasonStatusUnknown indicates a container the status of the container cannot be determined. -const ContainerReasonStatusUnknown string = "ContainerStatusUnknown" - -// Container provides the runtime information for a container, such as ID, hash, -// state of the container. -type Container struct { - // The ID of the container, used by the container runtime to identify - // a container. - ID ContainerID - // The name of the container, which should be the same as specified by - // v1.Container. - Name string - // The image name of the container, this also includes the tag of the image, - // the expected form is "NAME:TAG". - Image string - // The id of the image used by the container. - ImageID string - // The digested reference of the image used by the container. - ImageRef string - // Runtime handler used to pull the image if any. - ImageRuntimeHandler string - // Hash of the container, used for comparison. Optional for containers - // not managed by kubelet. - Hash uint64 - // State is the state of the container. - State State -} - -// PodStatus represents the status of the pod and its containers. -// v1.PodStatus can be derived from examining PodStatus and v1.Pod. -type PodStatus struct { - // ID of the pod. - ID types.UID - // Name of the pod. - Name string - // Namespace of the pod. - Namespace string - // All IPs assigned to this pod - IPs []string - // Status of containers in the pod. - ContainerStatuses []*Status - // Statuses of containers of the active sandbox in the pod. - ActiveContainerStatuses []*Status - // Status of the pod sandbox. - // Only for kuberuntime now, other runtime may keep it nil. - SandboxStatuses []*runtimeapi.PodSandboxStatus - // Timestamp at which container and pod statuses were recorded - TimeStamp time.Time -} - -// ContainerResources represents the Resources allocated to the running container. -type ContainerResources struct { - // CPU capacity reserved for the container - CPURequest *resource.Quantity - // CPU limit enforced on the container - CPULimit *resource.Quantity - // Memory capaacity reserved for the container - MemoryRequest *resource.Quantity - // Memory limit enforced on the container - MemoryLimit *resource.Quantity -} - -// Status represents the status of a container. -// -// Status does not contain VolumeMap because CRI API is unaware of volume names. -type Status struct { - // ID of the container. - ID ContainerID - // Name of the container. - Name string - // Status of the container. - State State - // Creation time of the container. - CreatedAt time.Time - // Start time of the container. - StartedAt time.Time - // Finish time of the container. - FinishedAt time.Time - // Exit code of the container. - ExitCode int - // Name of the image, this also includes the tag of the image, - // the expected form is "NAME:TAG". - Image string - // ID of the image. - ImageID string - // The digested reference of the image used by the container. - ImageRef string - // Runtime handler used to pull the image if any. - ImageRuntimeHandler string - // Hash of the container, used for comparison. - Hash uint64 - // Number of times that the container has been restarted. - RestartCount int - // A string explains why container is in such a status. - Reason string - // Message written by the container before exiting (stored in - // TerminationMessagePath). - Message string - // CPU and memory resources for this container - Resources *ContainerResources - // User identity information of the first process of this container - User *ContainerUser - // Mounts are the volume mounts of the container - Mounts []Mount - // StopSignal is used to show the container's effective stop signal in the Status - StopSignal *v1.Signal -} - -// ContainerUser represents user identity information -type ContainerUser struct { - // Linux holds user identity information of the first process of the containers in Linux. - // Note that this field cannot be set when spec.os.name is windows. - Linux *LinuxContainerUser - - // Windows holds user identity information of the first process of the containers in Windows - // This is just reserved for future use. - // Windows *WindowsContainerUser -} - -// LinuxContainerUser represents user identity information in Linux containers -type LinuxContainerUser struct { - // UID is the primary uid of the first process in the container - UID int64 - // GID is the primary gid of the first process in the container - GID int64 - // SupplementalGroups are the supplemental groups attached to the first process in the container - SupplementalGroups []int64 -} - -// FindContainerStatusByName returns container status in the pod status with the given name. -// When there are multiple containers' statuses with the same name, the first match will be returned. -func (podStatus *PodStatus) FindContainerStatusByName(containerName string) *Status { - for _, containerStatus := range podStatus.ContainerStatuses { - if containerStatus.Name == containerName { - return containerStatus - } - } - return nil -} - -// GetRunningContainerStatuses returns container status of all the running containers in a pod -func (podStatus *PodStatus) GetRunningContainerStatuses() []*Status { - runningContainerStatuses := []*Status{} - for _, containerStatus := range podStatus.ContainerStatuses { - if containerStatus.State == ContainerStateRunning { - runningContainerStatuses = append(runningContainerStatuses, containerStatus) - } - } - return runningContainerStatuses -} - -// Image contains basic information about a container image. -type Image struct { - // ID of the image. - ID string - // Other names by which this image is known. - RepoTags []string - // Digests by which this image is known. - RepoDigests []string - // The size of the image in bytes. - Size int64 - // ImageSpec for the image which include annotations. - Spec ImageSpec - // Pin for preventing garbage collection - Pinned bool -} - -// EnvVar represents the environment variable. -type EnvVar struct { - Name string - Value string -} - -// Annotation represents an annotation. -type Annotation struct { - Name string - Value string -} - -// Mount represents a volume mount. -type Mount struct { - // Name of the volume mount. - // TODO(yifan): Remove this field, as this is not representing the unique name of the mount, - // but the volume name only. - Name string - // Path of the mount within the container. - ContainerPath string - // Path of the mount on the host. - HostPath string - // Whether the mount is read-only. - ReadOnly bool - // Whether the mount is recursive read-only. - // Must not be true if ReadOnly is false. - RecursiveReadOnly bool - // Whether the mount needs SELinux relabeling - SELinuxRelabel bool - // Requested propagation mode - Propagation runtimeapi.MountPropagation - // Image is set if an OCI volume as image ID or digest should get mounted (special case). - Image *runtimeapi.ImageSpec - // ImageSubPath is set if an image volume sub path should get mounted. This - // field is only required if the above Image is set. - ImageSubPath string -} - -// ImageVolumes is a map of image specs by volume name. -type ImageVolumes = map[string]*runtimeapi.ImageSpec - -// PortMapping contains information about the port mapping. -type PortMapping struct { - // Protocol of the port mapping. - Protocol v1.Protocol - // The port number within the container. - ContainerPort int - // The port number on the host. - HostPort int - // The host IP. - HostIP string -} - -// DeviceInfo contains information about the device. -type DeviceInfo struct { - // Path on host for mapping - PathOnHost string - // Path in Container to map - PathInContainer string - // Cgroup permissions - Permissions string -} - -// CDIDevice contains information about CDI device -type CDIDevice struct { - // Name is a fully qualified device name according to - // https://github.com/cncf-tags/container-device-interface/blob/e66544063aa7760c4ea6330ce9e6c757f8e61df2/README.md?plain=1#L9-L15 - Name string -} - -// RunContainerOptions specify the options which are necessary for running containers -type RunContainerOptions struct { - // The environment variables list. - Envs []EnvVar - // The mounts for the containers. - Mounts []Mount - // The host devices mapped into the containers. - Devices []DeviceInfo - // The CDI devices for the container - CDIDevices []CDIDevice - // The annotations for the container - // These annotations are generated by other components (i.e., - // not users). Currently, only device plugins populate the annotations. - Annotations []Annotation - // If the container has specified the TerminationMessagePath, then - // this directory will be used to create and mount the log file to - // container.TerminationMessagePath - PodContainerDir string - // The type of container rootfs - ReadOnly bool -} - -// VolumeInfo contains information about the volume. -type VolumeInfo struct { - // Mounter is the volume's mounter - Mounter volume.Mounter - // BlockVolumeMapper is the Block volume's mapper - BlockVolumeMapper volume.BlockVolumeMapper - // SELinuxLabeled indicates whether this volume has had the - // pod's SELinux label applied to it or not - SELinuxLabeled bool - // Whether the volume permission is set to read-only or not - // This value is passed from volume.spec - ReadOnly bool - // Inner volume spec name, which is the PV name if used, otherwise - // it is the same as the outer volume spec name. - InnerVolumeSpecName string -} - -// VolumeMap represents the map of volumes. -type VolumeMap map[string]VolumeInfo - -// RuntimeConditionType is the types of required runtime conditions. -type RuntimeConditionType string - -const ( - // RuntimeReady means the runtime is up and ready to accept basic containers. - RuntimeReady RuntimeConditionType = "RuntimeReady" - // NetworkReady means the runtime network is up and ready to accept containers which require network. - NetworkReady RuntimeConditionType = "NetworkReady" -) - -// RuntimeStatus contains the status of the runtime. -type RuntimeStatus struct { - // Conditions is an array of current observed runtime conditions. - Conditions []RuntimeCondition - // Handlers is an array of current available handlers - Handlers []RuntimeHandler - // Features is the set of features implemented by the runtime - Features *RuntimeFeatures -} - -// GetRuntimeCondition gets a specified runtime condition from the runtime status. -func (r *RuntimeStatus) GetRuntimeCondition(t RuntimeConditionType) *RuntimeCondition { - for i := range r.Conditions { - c := &r.Conditions[i] - if c.Type == t { - return c - } - } - return nil -} - -// String formats the runtime status into human readable string. -func (r *RuntimeStatus) String() string { - var ss []string - var sh []string - for _, c := range r.Conditions { - ss = append(ss, c.String()) - } - for _, h := range r.Handlers { - sh = append(sh, h.String()) - } - return fmt.Sprintf("Runtime Conditions: %s; Handlers: %s, Features: %s", strings.Join(ss, ", "), strings.Join(sh, ", "), r.Features.String()) -} - -// RuntimeHandler contains condition information for the runtime handler. -type RuntimeHandler struct { - // Name is the handler name. - Name string - // SupportsRecursiveReadOnlyMounts is true if the handler has support for - // recursive read-only mounts. - SupportsRecursiveReadOnlyMounts bool - // SupportsUserNamespaces is true if the handler has support for - // user namespaces. - SupportsUserNamespaces bool -} - -// String formats the runtime handler into human readable string. -func (h *RuntimeHandler) String() string { - return fmt.Sprintf("Name=%s SupportsRecursiveReadOnlyMounts: %v SupportsUserNamespaces: %v", - h.Name, h.SupportsRecursiveReadOnlyMounts, h.SupportsUserNamespaces) -} - -// RuntimeCondition contains condition information for the runtime. -type RuntimeCondition struct { - // Type of runtime condition. - Type RuntimeConditionType - // Status of the condition, one of true/false. - Status bool - // Reason is brief reason for the condition's last transition. - Reason string - // Message is human readable message indicating details about last transition. - Message string -} - -// String formats the runtime condition into human readable string. -func (c *RuntimeCondition) String() string { - return fmt.Sprintf("%s=%t reason:%s message:%s", c.Type, c.Status, c.Reason, c.Message) -} - -// RuntimeFeatures contains the set of features implemented by the runtime -type RuntimeFeatures struct { - SupplementalGroupsPolicy bool -} - -// String formats the runtime condition into a human readable string. -func (f *RuntimeFeatures) String() string { - if f == nil { - return "nil" - } - return fmt.Sprintf("SupplementalGroupsPolicy: %v", f.SupplementalGroupsPolicy) -} - -// Pods represents the list of pods -type Pods []*Pod - -// FindPodByID finds and returns a pod in the pod list by UID. It will return an empty pod -// if not found. -func (p Pods) FindPodByID(podUID types.UID) Pod { - for i := range p { - if p[i].ID == podUID { - return *p[i] - } - } - return Pod{} -} - -// FindPodByFullName finds and returns a pod in the pod list by the full name. -// It will return an empty pod if not found. -func (p Pods) FindPodByFullName(podFullName string) Pod { - for i := range p { - if BuildPodFullName(p[i].Name, p[i].Namespace) == podFullName { - return *p[i] - } - } - return Pod{} -} - -// FindPod combines FindPodByID and FindPodByFullName, it finds and returns a pod in the -// pod list either by the full name or the pod ID. It will return an empty pod -// if not found. -func (p Pods) FindPod(podFullName string, podUID types.UID) Pod { - if len(podFullName) > 0 { - return p.FindPodByFullName(podFullName) - } - return p.FindPodByID(podUID) -} - -// FindContainerByName returns a container in the pod with the given name. -// When there are multiple containers with the same name, the first match will -// be returned. -func (p *Pod) FindContainerByName(containerName string) *Container { - for _, c := range p.Containers { - if c.Name == containerName { - return c - } - } - return nil -} - -// FindContainerByID returns a container in the pod with the given ContainerID. -func (p *Pod) FindContainerByID(id ContainerID) *Container { - for _, c := range p.Containers { - if c.ID == id { - return c - } - } - return nil -} - -// FindSandboxByID returns a sandbox in the pod with the given ContainerID. -func (p *Pod) FindSandboxByID(id ContainerID) *Container { - for _, c := range p.Sandboxes { - if c.ID == id { - return c - } - } - return nil -} - -// ToAPIPod converts Pod to v1.Pod. Note that if a field in v1.Pod has no -// corresponding field in Pod, the field would not be populated. -func (p *Pod) ToAPIPod() *v1.Pod { - var pod v1.Pod - pod.UID = p.ID - pod.Name = p.Name - pod.Namespace = p.Namespace - - for _, c := range p.Containers { - var container v1.Container - container.Name = c.Name - container.Image = c.Image - pod.Spec.Containers = append(pod.Spec.Containers, container) - } - return &pod -} - -// IsEmpty returns true if the pod is empty. -func (p *Pod) IsEmpty() bool { - return reflect.DeepEqual(p, &Pod{}) -} - -// GetPodFullName returns a name that uniquely identifies a pod. -func GetPodFullName(pod *v1.Pod) string { - // Use underscore as the delimiter because it is not allowed in pod name - // (DNS subdomain format), while allowed in the container name format. - return pod.Name + "_" + pod.Namespace -} - -// BuildPodFullName builds the pod full name from pod name and namespace. -func BuildPodFullName(name, namespace string) string { - return name + "_" + namespace -} - -// ParsePodFullName parsed the pod full name. -func ParsePodFullName(podFullName string) (string, string, error) { - parts := strings.Split(podFullName, "_") - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", fmt.Errorf("failed to parse the pod full name %q", podFullName) - } - return parts[0], parts[1], nil -} - -// Option is a functional option type for Runtime, useful for -// completely optional settings. -type Option func(Runtime) - -// SortContainerStatusesByCreationTime sorts the container statuses by creation time. -type SortContainerStatusesByCreationTime []*Status - -func (s SortContainerStatusesByCreationTime) Len() int { return len(s) } -func (s SortContainerStatusesByCreationTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s SortContainerStatusesByCreationTime) Less(i, j int) bool { - return s[i].CreatedAt.Before(s[j].CreatedAt) -} - -const ( - // MaxPodTerminationMessageLogLength is the maximum bytes any one pod may have written - // as termination message output across all containers. Containers will be evenly truncated - // until output is below this limit. - MaxPodTerminationMessageLogLength = 1024 * 12 - // MaxContainerTerminationMessageLength is the upper bound any one container may write to - // its termination message path. Contents above this length will be truncated. - MaxContainerTerminationMessageLength = 1024 * 4 - // MaxContainerTerminationMessageLogLength is the maximum bytes any one container will - // have written to its termination message when the message is read from the logs. - MaxContainerTerminationMessageLogLength = 1024 * 2 - // MaxContainerTerminationMessageLogLines is the maximum number of previous lines of - // log output that the termination message can contain. - MaxContainerTerminationMessageLogLines = 80 -) diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache.go deleted file mode 100644 index ea4b155e0d0..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//go:generate mockery -package container - -import ( - "context" - "sync" - "time" -) - -// RuntimeCache is in interface for obtaining cached Pods. -type RuntimeCache interface { - GetPods(context.Context) ([]*Pod, error) - ForceUpdateIfOlder(context.Context, time.Time) error -} - -type podsGetter interface { - GetPods(context.Context, bool) ([]*Pod, error) -} - -// NewRuntimeCache creates a container runtime cache. -func NewRuntimeCache(getter podsGetter, cachePeriod time.Duration) (RuntimeCache, error) { - return &runtimeCache{ - getter: getter, - cachePeriod: cachePeriod, - }, nil -} - -// runtimeCache caches a list of pods. It records a timestamp (cacheTime) right -// before updating the pods, so the timestamp is at most as new as the pods -// (and can be slightly older). The timestamp always moves forward. Callers are -// expected not to modify the pods returned from GetPods. -type runtimeCache struct { - sync.Mutex - // The underlying container runtime used to update the cache. - getter podsGetter - // The interval after which the cache should be refreshed. - cachePeriod time.Duration - // Last time when cache was updated. - cacheTime time.Time - // The content of the cache. - pods []*Pod -} - -// GetPods returns the cached pods if they are not outdated; otherwise, it -// retrieves the latest pods and return them. -func (r *runtimeCache) GetPods(ctx context.Context) ([]*Pod, error) { - r.Lock() - defer r.Unlock() - if time.Since(r.cacheTime) > r.cachePeriod { - if err := r.updateCache(ctx); err != nil { - return nil, err - } - } - return r.pods, nil -} - -func (r *runtimeCache) ForceUpdateIfOlder(ctx context.Context, minExpectedCacheTime time.Time) error { - r.Lock() - defer r.Unlock() - if r.cacheTime.Before(minExpectedCacheTime) { - return r.updateCache(ctx) - } - return nil -} - -func (r *runtimeCache) updateCache(ctx context.Context) error { - pods, timestamp, err := r.getPodsWithTimestamp(ctx) - if err != nil { - return err - } - r.pods, r.cacheTime = pods, timestamp - return nil -} - -// getPodsWithTimestamp records a timestamp and retrieves pods from the getter. -func (r *runtimeCache) getPodsWithTimestamp(ctx context.Context) ([]*Pod, time.Time, error) { - // Always record the timestamp before getting the pods to avoid stale pods. - timestamp := time.Now() - pods, err := r.getter.GetPods(ctx, false) - return pods, timestamp, err -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache_fake.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache_fake.go deleted file mode 100644 index 4a09b3be923..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache_fake.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package container - -import "context" - -// TestRuntimeCache embeds runtimeCache with some additional methods for testing. -// It must be declared in the container package to have visibility to runtimeCache. -// It cannot be in a "..._test.go" file in order for runtime_cache_test.go to have cross-package visibility to it. -// (cross-package declarations in test files cannot be used from dot imports if this package is vendored) -type TestRuntimeCache struct { - runtimeCache -} - -// UpdateCacheWithLock updates the cache with the lock. -func (r *TestRuntimeCache) UpdateCacheWithLock() error { - r.Lock() - defer r.Unlock() - return r.updateCache(context.Background()) -} - -// GetCachedPods returns the cached pods. -func (r *TestRuntimeCache) GetCachedPods() []*Pod { - r.Lock() - defer r.Unlock() - return r.pods -} - -// NewTestRuntimeCache creates a new instance of TestRuntimeCache. -func NewTestRuntimeCache(getter podsGetter) *TestRuntimeCache { - return &TestRuntimeCache{ - runtimeCache: runtimeCache{ - getter: getter, - }, - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go deleted file mode 100644 index b04456e5314..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package container - -import ( - "errors" - "fmt" - - utilerrors "k8s.io/apimachinery/pkg/util/errors" -) - -// TODO(random-liu): We need to better organize runtime errors for introspection. - -// ErrCrashLoopBackOff returned when a container Terminated and Kubelet is backing off the restart. -var ErrCrashLoopBackOff = errors.New("CrashLoopBackOff") - -var ( - // ErrContainerNotFound returned when a container in the given pod with the - // given container name was not found, amongst those managed by the kubelet. - ErrContainerNotFound = errors.New("no matching container") -) - -var ( - // ErrRunContainer returned when runtime failed to start any of pod's container. - ErrRunContainer = errors.New("RunContainerError") - // ErrKillContainer returned when runtime failed to kill any of pod's containers. - ErrKillContainer = errors.New("KillContainerError") - // ErrCreatePodSandbox returned when runtime failed to create a sandbox for pod. - ErrCreatePodSandbox = errors.New("CreatePodSandboxError") - // ErrConfigPodSandbox returned when runetime failed to get pod sandbox config from pod. - ErrConfigPodSandbox = errors.New("ConfigPodSandboxError") - // ErrKillPodSandbox returned when runtime failed to stop pod's sandbox. - ErrKillPodSandbox = errors.New("KillPodSandboxError") - // ErrResizePodInPlace returned when runtime failed to resize a pod. - ErrResizePodInPlace = errors.New("ResizePodInPlaceError") -) - -// SyncAction indicates different kind of actions in SyncPod() and KillPod(). Now there are only actions -// about start/kill container and setup/teardown network. -type SyncAction string - -const ( - // StartContainer action - StartContainer SyncAction = "StartContainer" - // KillContainer action - KillContainer SyncAction = "KillContainer" - // SetupNetwork action - SetupNetwork SyncAction = "SetupNetwork" - // TeardownNetwork action - TeardownNetwork SyncAction = "TeardownNetwork" - // InitContainer action - InitContainer SyncAction = "InitContainer" - // CreatePodSandbox action - CreatePodSandbox SyncAction = "CreatePodSandbox" - // ConfigPodSandbox action - ConfigPodSandbox SyncAction = "ConfigPodSandbox" - // KillPodSandbox action - KillPodSandbox SyncAction = "KillPodSandbox" - // ResizePodInPlace action is included whenever any containers in the pod are resized without restart - ResizePodInPlace SyncAction = "ResizePodInPlace" -) - -// SyncResult is the result of sync action. -type SyncResult struct { - // The associated action of the result - Action SyncAction - // The target of the action, now the target can only be: - // * Container: Target should be container name - // * Network: Target is useless now, we just set it as pod full name now - Target interface{} - // Brief error reason - Error error - // Human readable error reason - Message string -} - -// NewSyncResult generates new SyncResult with specific Action and Target -func NewSyncResult(action SyncAction, target interface{}) *SyncResult { - return &SyncResult{Action: action, Target: target} -} - -// Fail fails the SyncResult with specific error and message -func (r *SyncResult) Fail(err error, msg string) { - r.Error, r.Message = err, msg -} - -// PodSyncResult is the summary result of SyncPod() and KillPod() -type PodSyncResult struct { - // Result of different sync actions - SyncResults []*SyncResult - // Error encountered in SyncPod() and KillPod() that is not already included in SyncResults - SyncError error -} - -// AddSyncResult adds multiple SyncResult to current PodSyncResult -func (p *PodSyncResult) AddSyncResult(result ...*SyncResult) { - p.SyncResults = append(p.SyncResults, result...) -} - -// AddPodSyncResult merges a PodSyncResult to current one -func (p *PodSyncResult) AddPodSyncResult(result PodSyncResult) { - p.AddSyncResult(result.SyncResults...) - p.SyncError = result.SyncError -} - -// Fail fails the PodSyncResult with an error occurred in SyncPod() and KillPod() itself -func (p *PodSyncResult) Fail(err error) { - p.SyncError = err -} - -// Error returns an error summarizing all the errors in PodSyncResult -func (p *PodSyncResult) Error() error { - errlist := []error{} - if p.SyncError != nil { - errlist = append(errlist, fmt.Errorf("failed to SyncPod: %v", p.SyncError)) - } - for _, result := range p.SyncResults { - if result.Error != nil { - errlist = append(errlist, fmt.Errorf("failed to %q for %q with %v: %q", result.Action, result.Target, - result.Error, result.Message)) - } - } - return utilerrors.NewAggregate(errlist) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go index 526a67cdb2b..3b3d13705ff 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go @@ -36,6 +36,7 @@ const ( NetworkNotReady = "NetworkNotReady" ResizeDeferred = "ResizeDeferred" ResizeInfeasible = "ResizeInfeasible" + ResizeCompleted = "ResizeCompleted" ) // Image event reason list diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/types.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/types.go deleted file mode 100644 index 359f13e7bcc..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/types.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "time" - - "k8s.io/apimachinery/pkg/api/resource" -) - -// Signal defines a signal that can trigger eviction of pods on a node. -type Signal string - -const ( - // SignalMemoryAvailable is memory available (i.e. capacity - workingSet), in bytes. - SignalMemoryAvailable Signal = "memory.available" - // SignalNodeFsAvailable is amount of storage available on filesystem that kubelet uses for volumes, daemon logs, etc. - SignalNodeFsAvailable Signal = "nodefs.available" - // SignalNodeFsInodesFree is amount of inodes available on filesystem that kubelet uses for volumes, daemon logs, etc. - SignalNodeFsInodesFree Signal = "nodefs.inodesFree" - // SignalImageFsAvailable is amount of storage available on filesystem that container runtime uses for storing images layers. - // If the container filesystem and image filesystem are not separate, - // than imagefs can store both image layers and writeable layers. - SignalImageFsAvailable Signal = "imagefs.available" - // SignalImageFsInodesFree is amount of inodes available on filesystem that container runtime uses for storing images layers. - // If the container filesystem and image filesystem are not separate, - // than imagefs can store both image layers and writeable layers. - SignalImageFsInodesFree Signal = "imagefs.inodesFree" - // SignalContainerFsAvailable is amount of storage available on filesystem that container runtime uses for container writable layers. - // In case of a single filesystem, containerfs=nodefs. - // In case of a image filesystem, containerfs=imagefs. - // We will override user settings and set to either imagefs or nodefs depending on configuration. - SignalContainerFsAvailable Signal = "containerfs.available" - // SignalContainerFsInodesFree is amount of inodes available on filesystem that container runtime uses for container writable layers. - // In case of a single filesystem, containerfs=nodefs. - // In case of a image filesystem, containerfs=imagefs. - // We will override user settings and set to either imagefs or nodefs depending on configuration. - SignalContainerFsInodesFree Signal = "containerfs.inodesFree" - // SignalAllocatableMemoryAvailable is amount of memory available for pod allocation (i.e. allocatable - workingSet (of pods), in bytes. - SignalAllocatableMemoryAvailable Signal = "allocatableMemory.available" - // SignalPIDAvailable is amount of PID available for pod allocation - SignalPIDAvailable Signal = "pid.available" -) - -// ThresholdOperator is the operator used to express a Threshold. -type ThresholdOperator string - -const ( - // OpLessThan is the operator that expresses a less than operator. - OpLessThan ThresholdOperator = "LessThan" -) - -// OpForSignal maps Signals to ThresholdOperators. -// Today, the only supported operator is "LessThan". This may change in the future, -// for example if "consumed" (as opposed to "available") type signals are added. -// In both cases the directionality of the threshold is implicit to the signal type -// (for a given signal, the decision to evict will be made when crossing the threshold -// from either above or below, never both). There is thus no reason to expose the -// operator in the Kubelet's public API. Instead, we internally map signal types to operators. -var OpForSignal = map[Signal]ThresholdOperator{ - SignalMemoryAvailable: OpLessThan, - SignalNodeFsAvailable: OpLessThan, - SignalNodeFsInodesFree: OpLessThan, - SignalImageFsAvailable: OpLessThan, - SignalImageFsInodesFree: OpLessThan, - SignalContainerFsAvailable: OpLessThan, - SignalContainerFsInodesFree: OpLessThan, - SignalAllocatableMemoryAvailable: OpLessThan, - SignalPIDAvailable: OpLessThan, -} - -// ThresholdValue is a value holder that abstracts literal versus percentage based quantity -type ThresholdValue struct { - // The following fields are exclusive. Only the topmost non-zero field is used. - - // Quantity is a quantity associated with the signal that is evaluated against the specified operator. - Quantity *resource.Quantity - // Percentage represents the usage percentage over the total resource that is evaluated against the specified operator. - Percentage float32 -} - -// Threshold defines a metric for when eviction should occur. -type Threshold struct { - // Signal defines the entity that was measured. - Signal Signal - // Operator represents a relationship of a signal to a value. - Operator ThresholdOperator - // Value is the threshold the resource is evaluated against. - Value ThresholdValue - // GracePeriod represents the amount of time that a threshold must be met before eviction is triggered. - GracePeriod time.Duration - // MinReclaim represents the minimum amount of resource to reclaim if the threshold is met. - MinReclaim *ThresholdValue -} - -// GetThresholdQuantity returns the expected quantity value for a thresholdValue -func GetThresholdQuantity(value ThresholdValue, capacity *resource.Quantity) *resource.Quantity { - if value.Quantity != nil { - res := value.Quantity.DeepCopy() - return &res - } - return resource.NewQuantity(int64(float64(capacity.Value())*float64(value.Percentage)), resource.BinarySI) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/util/util.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/util/util.go deleted file mode 100644 index e980fd1ba20..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/util/util.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - v1 "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - "k8s.io/klog/v2" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" -) - -// PodSandboxChanged checks whether the spec of the pod is changed and returns -// (changed, new attempt, original sandboxID if exist). -func PodSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, uint32, string) { - if len(podStatus.SandboxStatuses) == 0 { - klog.V(2).InfoS("No sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod)) - return true, 0, "" - } - - readySandboxCount := 0 - for _, s := range podStatus.SandboxStatuses { - if s.State == runtimeapi.PodSandboxState_SANDBOX_READY { - readySandboxCount++ - } - } - - // Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one. - sandboxStatus := podStatus.SandboxStatuses[0] - if readySandboxCount > 1 { - klog.V(2).InfoS("Multiple sandboxes are ready for Pod. Need to reconcile them", "pod", klog.KObj(pod)) - return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id - } - if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY { - klog.V(2).InfoS("No ready sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod)) - return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id - } - - // Needs to create a new sandbox when network namespace changed. - if sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != NetworkNamespaceForPod(pod) { - klog.V(2).InfoS("Sandbox for pod has changed. Need to start a new one", "pod", klog.KObj(pod)) - return true, sandboxStatus.Metadata.Attempt + 1, "" - } - - // Needs to create a new sandbox when the sandbox does not have an IP address. - if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network != nil && sandboxStatus.Network.Ip == "" { - klog.V(2).InfoS("Sandbox for pod has no IP address. Need to start a new one", "pod", klog.KObj(pod)) - return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id - } - - return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id -} - -// IpcNamespaceForPod returns the runtimeapi.NamespaceMode -// for the IPC namespace of a pod -func IpcNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode { - if pod != nil && pod.Spec.HostIPC { - return runtimeapi.NamespaceMode_NODE - } - return runtimeapi.NamespaceMode_POD -} - -// NetworkNamespaceForPod returns the runtimeapi.NamespaceMode -// for the network namespace of a pod -func NetworkNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode { - if pod != nil && pod.Spec.HostNetwork { - return runtimeapi.NamespaceMode_NODE - } - return runtimeapi.NamespaceMode_POD -} - -// PidNamespaceForPod returns the runtimeapi.NamespaceMode -// for the PID namespace of a pod -func PidNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode { - if pod != nil { - if pod.Spec.HostPID { - return runtimeapi.NamespaceMode_NODE - } - if pod.Spec.ShareProcessNamespace != nil && *pod.Spec.ShareProcessNamespace { - return runtimeapi.NamespaceMode_POD - } - } - // Note that PID does not default to the zero value for v1.Pod - return runtimeapi.NamespaceMode_CONTAINER -} - -// LookupRuntimeHandler is implemented by *runtimeclass.Manager. -type RuntimeHandlerResolver interface { - LookupRuntimeHandler(runtimeClassName *string) (string, error) -} - -// namespacesForPod returns the runtimeapi.NamespaceOption for a given pod. -// An empty or nil pod can be used to get the namespace defaults for v1.Pod. -func NamespacesForPod(pod *v1.Pod, runtimeHelper kubecontainer.RuntimeHelper, rcManager RuntimeHandlerResolver) (*runtimeapi.NamespaceOption, error) { - runtimeHandler := "" - if pod != nil && rcManager != nil { - var err error - runtimeHandler, err = rcManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName) - if err != nil { - return nil, err - } - } - userNs, err := runtimeHelper.GetOrCreateUserNamespaceMappings(pod, runtimeHandler) - if err != nil { - return nil, err - } - - return &runtimeapi.NamespaceOption{ - Ipc: IpcNamespaceForPod(pod), - Network: NetworkNamespaceForPod(pod), - Pid: PidNamespaceForPod(pod), - UsernsOptions: userNs, - }, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/admission_failure_handler_stub.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/admission_failure_handler_stub.go deleted file mode 100644 index d22df740955..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/admission_failure_handler_stub.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lifecycle - -import ( - "k8s.io/api/core/v1" -) - -// AdmissionFailureHandlerStub is an AdmissionFailureHandler that does not perform any handling of admission failure. -// It simply passes the failure on. -type AdmissionFailureHandlerStub struct{} - -var _ AdmissionFailureHandler = &AdmissionFailureHandlerStub{} - -// NewAdmissionFailureHandlerStub returns an instance of AdmissionFailureHandlerStub. -func NewAdmissionFailureHandlerStub() *AdmissionFailureHandlerStub { - return &AdmissionFailureHandlerStub{} -} - -// HandleAdmissionFailure simply passes admission rejection on, with no special handling. -func (n *AdmissionFailureHandlerStub) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []PredicateFailureReason) ([]PredicateFailureReason, error) { - return failureReasons, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/doc.go deleted file mode 100644 index 0c1dd9a1cc7..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lifecycle contains handlers for pod lifecycle events and interfaces -// to integrate with kubelet admission, synchronization, and eviction of pods. -package lifecycle diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go deleted file mode 100644 index b0c22a78db2..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lifecycle - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/tools/record" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/features" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/metrics" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - "k8s.io/kubernetes/pkg/kubelet/util/format" - httpprobe "k8s.io/kubernetes/pkg/probe/http" - "k8s.io/kubernetes/pkg/security/apparmor" -) - -const ( - maxRespBodyLength = 10 * 1 << 10 // 10KB - - AppArmorNotAdmittedReason = "AppArmor" -) - -type handlerRunner struct { - httpDoer kubetypes.HTTPDoer - commandRunner kubecontainer.CommandRunner - containerManager podStatusProvider - eventRecorder record.EventRecorder -} - -type podStatusProvider interface { - GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) -} - -// NewHandlerRunner returns a configured lifecycle handler for a container. -func NewHandlerRunner(httpDoer kubetypes.HTTPDoer, commandRunner kubecontainer.CommandRunner, containerManager podStatusProvider, eventRecorder record.EventRecorder) kubecontainer.HandlerRunner { - return &handlerRunner{ - httpDoer: httpDoer, - commandRunner: commandRunner, - containerManager: containerManager, - eventRecorder: eventRecorder, - } -} - -func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) { - switch { - case handler.Exec != nil: - var msg string - // TODO(tallclair): Pass a proper timeout value. - output, err := hr.commandRunner.RunInContainer(ctx, containerID, handler.Exec.Command, 0) - if err != nil { - msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) - klog.V(1).ErrorS(err, "Exec lifecycle hook for Container in Pod failed", "execCommand", handler.Exec.Command, "containerName", container.Name, "pod", klog.KObj(pod), "message", string(output)) - } - return msg, err - case handler.HTTPGet != nil: - err := hr.runHTTPHandler(ctx, pod, container, handler, hr.eventRecorder) - var msg string - if err != nil { - msg = fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", handler.HTTPGet.Path, container.Name, format.Pod(pod), err) - klog.V(1).ErrorS(err, "HTTP lifecycle hook for Container in Pod failed", "path", handler.HTTPGet.Path, "containerName", container.Name, "pod", klog.KObj(pod)) - } - return msg, err - case handler.Sleep != nil: - err := hr.runSleepHandler(ctx, handler.Sleep.Seconds) - var msg string - if err != nil { - msg = fmt.Sprintf("Sleep lifecycle hook (%d) for Container %q in Pod %q failed - error: %v", handler.Sleep.Seconds, container.Name, format.Pod(pod), err) - klog.V(1).ErrorS(err, "Sleep lifecycle hook for Container in Pod failed", "sleepSeconds", handler.Sleep.Seconds, "containerName", container.Name, "pod", klog.KObj(pod)) - } - return msg, err - default: - err := fmt.Errorf("invalid handler: %v", handler) - msg := fmt.Sprintf("Cannot run handler: %v", err) - klog.ErrorS(err, "Cannot run handler") - return msg, err - } -} - -// resolvePort attempts to turn an IntOrString port reference into a concrete port number. -// If portReference has an int value, it is treated as a literal, and simply returns that value. -// If portReference is a string, an attempt is first made to parse it as an integer. If that fails, -// an attempt is made to find a port with the same name in the container spec. -// If a port with the same name is found, it's ContainerPort value is returned. If no matching -// port is found, an error is returned. -func resolvePort(portReference intstr.IntOrString, container *v1.Container) (int, error) { - if portReference.Type == intstr.Int { - return portReference.IntValue(), nil - } - portName := portReference.StrVal - port, err := strconv.Atoi(portName) - if err == nil { - return port, nil - } - for _, portSpec := range container.Ports { - if portSpec.Name == portName { - return int(portSpec.ContainerPort), nil - } - } - return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container) -} - -func (hr *handlerRunner) runSleepHandler(ctx context.Context, seconds int64) error { - if !utilfeature.DefaultFeatureGate.Enabled(features.PodLifecycleSleepAction) { - return nil - } - c := time.After(time.Duration(seconds) * time.Second) - select { - case <-ctx.Done(): - // unexpected termination - metrics.LifecycleHandlerSleepTerminated.Inc() - return fmt.Errorf("container terminated before sleep hook finished") - case <-c: - return nil - } -} - -func (hr *handlerRunner) runHTTPHandler(ctx context.Context, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error { - host := handler.HTTPGet.Host - podIP := host - if len(host) == 0 { - status, err := hr.containerManager.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) - if err != nil { - klog.ErrorS(err, "Unable to get pod info, event handlers may be invalid.", "pod", klog.KObj(pod)) - return err - } - if len(status.IPs) == 0 { - return fmt.Errorf("failed to find networking container: %v", status) - } - host = status.IPs[0] - podIP = host - } - - req, err := httpprobe.NewRequestForHTTPGetAction(handler.HTTPGet, container, podIP, "lifecycle") - if err != nil { - return err - } - resp, err := hr.httpDoer.Do(req) - discardHTTPRespBody(resp) - - if isHTTPResponseError(err) { - klog.V(1).ErrorS(err, "HTTPS request to lifecycle hook got HTTP response, retrying with HTTP.", "pod", klog.KObj(pod), "host", req.URL.Host) - - req := req.Clone(context.Background()) - req.URL.Scheme = "http" - req.Header.Del("Authorization") - resp, httpErr := hr.httpDoer.Do(req) - - // clear err since the fallback succeeded - if httpErr == nil { - metrics.LifecycleHandlerHTTPFallbacks.Inc() - if eventRecorder != nil { - // report the fallback with an event - eventRecorder.Event(pod, v1.EventTypeWarning, "LifecycleHTTPFallback", fmt.Sprintf("request to HTTPS lifecycle hook %s got HTTP response, retry with HTTP succeeded", req.URL.Host)) - } - err = nil - } - discardHTTPRespBody(resp) - } - return err -} - -func discardHTTPRespBody(resp *http.Response) { - if resp == nil { - return - } - - // Ensure the response body is fully read and closed - // before we reconnect, so that we reuse the same TCP - // connection. - defer resp.Body.Close() - - if resp.ContentLength <= maxRespBodyLength { - io.Copy(io.Discard, &io.LimitedReader{R: resp.Body, N: maxRespBodyLength}) - } -} - -// NewAppArmorAdmitHandler returns a PodAdmitHandler which is used to evaluate -// if a pod can be admitted from the perspective of AppArmor. -func NewAppArmorAdmitHandler(validator apparmor.Validator) PodAdmitHandler { - return &appArmorAdmitHandler{ - Validator: validator, - } -} - -type appArmorAdmitHandler struct { - apparmor.Validator -} - -func (a *appArmorAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult { - // If the pod is already running or terminated, no need to recheck AppArmor. - if attrs.Pod.Status.Phase != v1.PodPending { - return PodAdmitResult{Admit: true} - } - - err := a.Validate(attrs.Pod) - if err == nil { - return PodAdmitResult{Admit: true} - } - return PodAdmitResult{ - Admit: false, - Reason: AppArmorNotAdmittedReason, - Message: fmt.Sprintf("Cannot enforce AppArmor: %v", err), - } -} - -func isHTTPResponseError(err error) bool { - if err == nil { - return false - } - urlErr := &url.Error{} - if !errors.As(err, &urlErr) { - return false - } - return strings.Contains(urlErr.Err.Error(), "server gave HTTP response to HTTPS client") -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/interfaces.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/interfaces.go deleted file mode 100644 index 3be7adade54..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/interfaces.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lifecycle - -import "k8s.io/api/core/v1" - -// PodAdmitAttributes is the context for a pod admission decision. -// The member fields of this struct should never be mutated. -type PodAdmitAttributes struct { - // the pod to evaluate for admission - Pod *v1.Pod - // all pods bound to the kubelet excluding the pod being evaluated - OtherPods []*v1.Pod -} - -// PodAdmitResult provides the result of a pod admission decision. -type PodAdmitResult struct { - // if true, the pod should be admitted. - Admit bool - // a brief single-word reason why the pod could not be admitted. - Reason string - // a brief message explaining why the pod could not be admitted. - Message string -} - -// PodAdmitHandler is notified during pod admission. -type PodAdmitHandler interface { - // Admit evaluates if a pod can be admitted. - Admit(attrs *PodAdmitAttributes) PodAdmitResult -} - -// PodAdmitTarget maintains a list of handlers to invoke. -type PodAdmitTarget interface { - // AddPodAdmitHandler adds the specified handler. - AddPodAdmitHandler(a PodAdmitHandler) -} - -// PodSyncLoopHandler is invoked during each sync loop iteration. -type PodSyncLoopHandler interface { - // ShouldSync returns true if the pod needs to be synced. - // This operation must return immediately as its called for each pod. - // The provided pod should never be modified. - ShouldSync(pod *v1.Pod) bool -} - -// PodSyncLoopTarget maintains a list of handlers to pod sync loop. -type PodSyncLoopTarget interface { - // AddPodSyncLoopHandler adds the specified handler. - AddPodSyncLoopHandler(a PodSyncLoopHandler) -} - -// ShouldEvictResponse provides the result of a should evict request. -type ShouldEvictResponse struct { - // if true, the pod should be evicted. - Evict bool - // a brief CamelCase reason why the pod should be evicted. - Reason string - // a brief message why the pod should be evicted. - Message string -} - -// PodSyncHandler is invoked during each sync pod operation. -type PodSyncHandler interface { - // ShouldEvict is invoked during each sync pod operation to determine - // if the pod should be evicted from the kubelet. If so, the pod status - // is updated to mark its phase as failed with the provided reason and message, - // and the pod is immediately killed. - // This operation must return immediately as its called for each sync pod. - // The provided pod should never be modified. - ShouldEvict(pod *v1.Pod) ShouldEvictResponse -} - -// PodSyncTarget maintains a list of handlers to pod sync. -type PodSyncTarget interface { - // AddPodSyncHandler adds the specified handler - AddPodSyncHandler(a PodSyncHandler) -} - -// PodLifecycleTarget groups a set of lifecycle interfaces for convenience. -type PodLifecycleTarget interface { - PodAdmitTarget - PodSyncLoopTarget - PodSyncTarget -} - -// PodAdmitHandlers maintains a list of handlers to pod admission. -type PodAdmitHandlers []PodAdmitHandler - -// AddPodAdmitHandler adds the specified observer. -func (handlers *PodAdmitHandlers) AddPodAdmitHandler(a PodAdmitHandler) { - *handlers = append(*handlers, a) -} - -// PodSyncLoopHandlers maintains a list of handlers to pod sync loop. -type PodSyncLoopHandlers []PodSyncLoopHandler - -// AddPodSyncLoopHandler adds the specified observer. -func (handlers *PodSyncLoopHandlers) AddPodSyncLoopHandler(a PodSyncLoopHandler) { - *handlers = append(*handlers, a) -} - -// PodSyncHandlers maintains a list of handlers to pod sync. -type PodSyncHandlers []PodSyncHandler - -// AddPodSyncHandler adds the specified handler. -func (handlers *PodSyncHandlers) AddPodSyncHandler(a PodSyncHandler) { - *handlers = append(*handlers, a) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go deleted file mode 100644 index a505faca694..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go +++ /dev/null @@ -1,412 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lifecycle - -import ( - "fmt" - "runtime" - - v1 "k8s.io/api/core/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/component-base/featuregate" - "k8s.io/component-helpers/scheduling/corev1" - "k8s.io/klog/v2" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/types" - "k8s.io/kubernetes/pkg/scheduler" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration" - "k8s.io/utils/ptr" -) - -const ( - // PodOSSelectorNodeLabelDoesNotMatch is used to denote that the pod was - // rejected admission to the node because the pod's node selector - // corresponding to kubernetes.io/os label didn't match the node label. - PodOSSelectorNodeLabelDoesNotMatch = "PodOSSelectorNodeLabelDoesNotMatch" - - // PodOSNotSupported is used to denote that the pod was rejected admission - // to the node because the pod's OS field didn't match the node OS. - PodOSNotSupported = "PodOSNotSupported" - - // InvalidNodeInfo is used to denote that the pod was rejected admission - // to the node because the kubelet was unable to retrieve the node info. - InvalidNodeInfo = "InvalidNodeInfo" - - // InitContainerRestartPolicyForbidden is used to denote that the pod was - // rejected admission to the node because it uses a restart policy other - // than Always for some of its init containers. - InitContainerRestartPolicyForbidden = "InitContainerRestartPolicyForbidden" - - // SupplementalGroupsPolicyNotSupported is used to denote that the pod was - // rejected admission to the node because the node does not support - // the pod's SupplementalGroupsPolicy. - SupplementalGroupsPolicyNotSupported = "SupplementalGroupsPolicyNotSupported" - - // UnexpectedAdmissionError is used to denote that the pod was rejected - // admission to the node because of an error during admission that could not - // be categorized. - UnexpectedAdmissionError = "UnexpectedAdmissionError" - - // UnknownReason is used to denote that the pod was rejected admission to - // the node because a predicate failed for a reason that could not be - // determined. - UnknownReason = "UnknownReason" - - // UnexpectedPredicateFailureType is used to denote that the pod was - // rejected admission to the node because a predicate returned a reason - // object that was not an InsufficientResourceError or a PredicateFailureError. - UnexpectedPredicateFailureType = "UnexpectedPredicateFailureType" - - // Prefix for admission reason when kubelet rejects a pod due to insufficient - // resources available. - InsufficientResourcePrefix = "OutOf" - - // These reasons are used to denote that the pod has reject admission - // to the node because there's not enough resources to run the pod. - OutOfCPU = "OutOfcpu" - OutOfMemory = "OutOfmemory" - OutOfEphemeralStorage = "OutOfephemeral-storage" - OutOfPods = "OutOfpods" -) - -type getNodeAnyWayFuncType func() (*v1.Node, error) - -type pluginResourceUpdateFuncType func(*schedulerframework.NodeInfo, *PodAdmitAttributes) error - -// AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod. -// This allows for the graceful handling of pod admission failure. -type AdmissionFailureHandler interface { - HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []PredicateFailureReason) ([]PredicateFailureReason, error) -} - -type predicateAdmitHandler struct { - getNodeAnyWayFunc getNodeAnyWayFuncType - pluginResourceUpdateFunc pluginResourceUpdateFuncType - admissionFailureHandler AdmissionFailureHandler -} - -var _ PodAdmitHandler = &predicateAdmitHandler{} - -// NewPredicateAdmitHandler returns a PodAdmitHandler which is used to evaluates -// if a pod can be admitted from the perspective of predicates. -func NewPredicateAdmitHandler(getNodeAnyWayFunc getNodeAnyWayFuncType, admissionFailureHandler AdmissionFailureHandler, pluginResourceUpdateFunc pluginResourceUpdateFuncType) PodAdmitHandler { - return &predicateAdmitHandler{ - getNodeAnyWayFunc, - pluginResourceUpdateFunc, - admissionFailureHandler, - } -} - -func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult { - node, err := w.getNodeAnyWayFunc() - if err != nil { - klog.ErrorS(err, "Cannot get Node info") - return PodAdmitResult{ - Admit: false, - Reason: InvalidNodeInfo, - Message: "Kubelet cannot get node info.", - } - } - admitPod := attrs.Pod - - // perform the checks that preemption will not help first to avoid meaningless pod eviction - if rejectPodAdmissionBasedOnOSSelector(admitPod, node) { - return PodAdmitResult{ - Admit: false, - Reason: PodOSSelectorNodeLabelDoesNotMatch, - Message: "Failed to admit pod as the `kubernetes.io/os` label doesn't match node label", - } - } - if rejectPodAdmissionBasedOnOSField(admitPod) { - return PodAdmitResult{ - Admit: false, - Reason: PodOSNotSupported, - Message: "Failed to admit pod as the OS field doesn't match node OS", - } - } - - if rejectPodAdmissionBasedOnSupplementalGroupsPolicy(admitPod, node) { - message := fmt.Sprintf("SupplementalGroupsPolicy=%s is not supported in this node", v1.SupplementalGroupsPolicyStrict) - klog.InfoS("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message) - return PodAdmitResult{ - Admit: false, - Reason: SupplementalGroupsPolicyNotSupported, - Message: message, - } - } - - pods := attrs.OtherPods - nodeInfo := schedulerframework.NewNodeInfo(pods...) - nodeInfo.SetNode(node) - - // ensure the node has enough plugin resources for that required in pods - if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil { - message := fmt.Sprintf("Update plugin resources failed due to %v, which is unexpected.", err) - klog.InfoS("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message) - return PodAdmitResult{ - Admit: false, - Reason: UnexpectedAdmissionError, - Message: message, - } - } - - // Remove the requests of the extended resources that are missing in the - // node info. This is required to support cluster-level resources, which - // are extended resources unknown to nodes. - // - // Caveat: If a pod was manually bound to a node (e.g., static pod) where a - // node-level extended resource it requires is not found, then kubelet will - // not fail admission while it should. This issue will be addressed with - // the Resource Class API in the future. - podWithoutMissingExtendedResources := removeMissingExtendedResources(admitPod, nodeInfo) - - reasons := generalFilter(podWithoutMissingExtendedResources, nodeInfo) - fit := len(reasons) == 0 - if !fit { - reasons, err = w.admissionFailureHandler.HandleAdmissionFailure(admitPod, reasons) - fit = len(reasons) == 0 && err == nil - if err != nil { - message := fmt.Sprintf("Unexpected error while attempting to recover from admission failure: %v", err) - klog.InfoS("Failed to admit pod, unexpected error while attempting to recover from admission failure", "pod", klog.KObj(admitPod), "err", err) - return PodAdmitResult{ - Admit: fit, - Reason: UnexpectedAdmissionError, - Message: message, - } - } - } - if !fit { - var reason string - var message string - if len(reasons) == 0 { - message = fmt.Sprint("GeneralPredicates failed due to unknown reason, which is unexpected.") - klog.InfoS("Failed to admit pod: GeneralPredicates failed due to unknown reason, which is unexpected", "pod", klog.KObj(admitPod)) - return PodAdmitResult{ - Admit: fit, - Reason: UnknownReason, - Message: message, - } - } - // If there are failed predicates, we only return the first one as a reason. - r := reasons[0] - switch re := r.(type) { - case *PredicateFailureError: - reason = re.PredicateName - message = re.Error() - klog.V(2).InfoS("Predicate failed on Pod", "pod", klog.KObj(admitPod), "err", message) - case *InsufficientResourceError: - switch re.ResourceName { - case v1.ResourceCPU: - reason = OutOfCPU - case v1.ResourceMemory: - reason = OutOfMemory - case v1.ResourceEphemeralStorage: - reason = OutOfEphemeralStorage - case v1.ResourcePods: - reason = OutOfPods - default: - reason = fmt.Sprintf("%s%s", InsufficientResourcePrefix, re.ResourceName) - } - message = re.Error() - klog.V(2).InfoS("Predicate failed on Pod", "pod", klog.KObj(admitPod), "err", message) - default: - reason = UnexpectedPredicateFailureType - message = fmt.Sprintf("GeneralPredicates failed due to %v, which is unexpected.", r) - klog.InfoS("Failed to admit pod", "pod", klog.KObj(admitPod), "err", message) - } - return PodAdmitResult{ - Admit: fit, - Reason: reason, - Message: message, - } - } - return PodAdmitResult{ - Admit: true, - } -} - -// rejectPodAdmissionBasedOnOSSelector rejects pod if it's nodeSelector doesn't match -// We expect the kubelet status reconcile which happens every 10sec to update the node labels if there is a mismatch. -func rejectPodAdmissionBasedOnOSSelector(pod *v1.Pod, node *v1.Node) bool { - labels := node.Labels - osName, osLabelExists := labels[v1.LabelOSStable] - if !osLabelExists || osName != runtime.GOOS { - if len(labels) == 0 { - labels = make(map[string]string) - } - labels[v1.LabelOSStable] = runtime.GOOS - } - podLabelSelector, podOSLabelExists := pod.Labels[v1.LabelOSStable] - if !podOSLabelExists { - // If the labelselector didn't exist, let's keep the current behavior as is - return false - } else if podOSLabelExists && podLabelSelector != labels[v1.LabelOSStable] { - return true - } - return false -} - -// rejectPodAdmissionBasedOnOSField rejects pods if their OS field doesn't match runtime.GOOS. -// TODO: Relax this restriction when we start supporting LCOW in kubernetes where podOS may not match -// node's OS. -func rejectPodAdmissionBasedOnOSField(pod *v1.Pod) bool { - if pod.Spec.OS == nil { - return false - } - // If the pod OS doesn't match runtime.GOOS return false - return string(pod.Spec.OS.Name) != runtime.GOOS -} - -// rejectPodAdmissionBasedOnSupplementalGroupsPolicy rejects pod only if -// - the feature is beta or above, and SupplementalPolicy=Strict is set in the pod -// - but, the node does not support the feature -// -// Note: During the feature is alpha or before(not yet released) in emulated version, -// it should admit for backward compatibility -func rejectPodAdmissionBasedOnSupplementalGroupsPolicy(pod *v1.Pod, node *v1.Node) bool { - admit, reject := false, true // just for readability - - inUse := (pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SupplementalGroupsPolicy != nil) - if !inUse { - return admit - } - - isBetaOrAbove := false - if featureSpec, ok := utilfeature.DefaultMutableFeatureGate.GetAll()[features.SupplementalGroupsPolicy]; ok { - isBetaOrAbove = (featureSpec.PreRelease == featuregate.Beta) || (featureSpec.PreRelease == featuregate.GA) - } - - if !isBetaOrAbove { - return admit - } - - featureSupportedOnNode := ptr.Deref( - ptr.Deref(node.Status.Features, v1.NodeFeatures{SupplementalGroupsPolicy: ptr.To(false)}).SupplementalGroupsPolicy, - false, - ) - effectivePolicy := ptr.Deref( - pod.Spec.SecurityContext.SupplementalGroupsPolicy, - v1.SupplementalGroupsPolicyMerge, - ) - - if effectivePolicy == v1.SupplementalGroupsPolicyStrict && !featureSupportedOnNode { - return reject - } - - return admit -} - -func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) *v1.Pod { - filterExtendedResources := func(containers []v1.Container) { - for i, c := range containers { - // We only handle requests in Requests but not Limits because the - // PodFitsResources predicate, to which the result pod will be passed, - // does not use Limits. - filteredResources := make(v1.ResourceList) - for rName, rQuant := range c.Resources.Requests { - if v1helper.IsExtendedResourceName(rName) { - if _, found := nodeInfo.Allocatable.ScalarResources[rName]; !found { - continue - } - } - filteredResources[rName] = rQuant - } - containers[i].Resources.Requests = filteredResources - } - } - podCopy := pod.DeepCopy() - filterExtendedResources(podCopy.Spec.Containers) - filterExtendedResources(podCopy.Spec.InitContainers) - return podCopy -} - -// InsufficientResourceError is an error type that indicates what kind of resource limit is -// hit and caused the unfitting failure. -type InsufficientResourceError struct { - ResourceName v1.ResourceName - Requested int64 - Used int64 - Capacity int64 -} - -func (e *InsufficientResourceError) Error() string { - return fmt.Sprintf("Node didn't have enough resource: %s, requested: %d, used: %d, capacity: %d", - e.ResourceName, e.Requested, e.Used, e.Capacity) -} - -// PredicateFailureReason interface represents the failure reason of a predicate. -type PredicateFailureReason interface { - GetReason() string -} - -// GetReason returns the reason of the InsufficientResourceError. -func (e *InsufficientResourceError) GetReason() string { - return fmt.Sprintf("Insufficient %v", e.ResourceName) -} - -// GetInsufficientAmount returns the amount of the insufficient resource of the error. -func (e *InsufficientResourceError) GetInsufficientAmount() int64 { - return e.Requested - (e.Capacity - e.Used) -} - -// PredicateFailureError describes a failure error of predicate. -type PredicateFailureError struct { - PredicateName string - PredicateDesc string -} - -func (e *PredicateFailureError) Error() string { - return fmt.Sprintf("Predicate %s failed: %s", e.PredicateName, e.PredicateDesc) -} - -// GetReason returns the reason of the PredicateFailureError. -func (e *PredicateFailureError) GetReason() string { - return e.PredicateDesc -} - -// generalFilter checks a group of filterings that the kubelet cares about. -func generalFilter(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) []PredicateFailureReason { - admissionResults := scheduler.AdmissionCheck(pod, nodeInfo, true) - var reasons []PredicateFailureReason - for _, r := range admissionResults { - if r.InsufficientResource != nil { - reasons = append(reasons, &InsufficientResourceError{ - ResourceName: r.InsufficientResource.ResourceName, - Requested: r.InsufficientResource.Requested, - Used: r.InsufficientResource.Used, - Capacity: r.InsufficientResource.Capacity, - }) - } else { - reasons = append(reasons, &PredicateFailureError{r.Name, r.Reason}) - } - } - - // Check taint/toleration except for static pods - if !types.IsStaticPod(pod) { - _, isUntolerated := corev1.FindMatchingUntoleratedTaint(nodeInfo.Node().Spec.Taints, pod.Spec.Tolerations, func(t *v1.Taint) bool { - // Kubelet is only interested in the NoExecute taint. - return t.Effect == v1.TaintEffectNoExecute - }) - if isUntolerated { - reasons = append(reasons, &PredicateFailureError{tainttoleration.Name, tainttoleration.ErrReasonNotMatch}) - } - } - - return reasons -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/OWNERS deleted file mode 100644 index 4971e756461..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners -approvers: - - dashpole diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go deleted file mode 100644 index 7a04387ab6f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go +++ /dev/null @@ -1,1209 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "sync" - "time" - - "k8s.io/component-base/metrics" - "k8s.io/component-base/metrics/legacyregistry" - - "k8s.io/apimachinery/pkg/types" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/features" -) - -// This const block defines the metric names for the kubelet metrics. -const ( - FirstNetworkPodStartSLIDurationKey = "first_network_pod_start_sli_duration_seconds" - KubeletSubsystem = "kubelet" - DRASubsystem = "dra" - NodeNameKey = "node_name" - NodeLabelKey = "node" - NodeStartupPreKubeletKey = "node_startup_pre_kubelet_duration_seconds" - NodeStartupPreRegistrationKey = "node_startup_pre_registration_duration_seconds" - NodeStartupRegistrationKey = "node_startup_registration_duration_seconds" - NodeStartupPostRegistrationKey = "node_startup_post_registration_duration_seconds" - NodeStartupKey = "node_startup_duration_seconds" - PodWorkerDurationKey = "pod_worker_duration_seconds" - PodStartDurationKey = "pod_start_duration_seconds" - PodStartSLIDurationKey = "pod_start_sli_duration_seconds" - PodStartTotalDurationKey = "pod_start_total_duration_seconds" - CgroupManagerOperationsKey = "cgroup_manager_duration_seconds" - PodWorkerStartDurationKey = "pod_worker_start_duration_seconds" - PodStatusSyncDurationKey = "pod_status_sync_duration_seconds" - PLEGRelistDurationKey = "pleg_relist_duration_seconds" - PLEGDiscardEventsKey = "pleg_discard_events" - PLEGRelistIntervalKey = "pleg_relist_interval_seconds" - PLEGLastSeenKey = "pleg_last_seen_seconds" - EventedPLEGConnErrKey = "evented_pleg_connection_error_count" - EventedPLEGConnKey = "evented_pleg_connection_success_count" - EventedPLEGConnLatencyKey = "evented_pleg_connection_latency_seconds" - EvictionsKey = "evictions" - EvictionStatsAgeKey = "eviction_stats_age_seconds" - PreemptionsKey = "preemptions" - VolumeStatsCapacityBytesKey = "volume_stats_capacity_bytes" - VolumeStatsAvailableBytesKey = "volume_stats_available_bytes" - VolumeStatsUsedBytesKey = "volume_stats_used_bytes" - VolumeStatsInodesKey = "volume_stats_inodes" - VolumeStatsInodesFreeKey = "volume_stats_inodes_free" - VolumeStatsInodesUsedKey = "volume_stats_inodes_used" - VolumeStatsHealthStatusAbnormalKey = "volume_stats_health_status_abnormal" - RunningPodsKey = "running_pods" - RunningContainersKey = "running_containers" - DesiredPodCountKey = "desired_pods" - ActivePodCountKey = "active_pods" - MirrorPodCountKey = "mirror_pods" - WorkingPodCountKey = "working_pods" - OrphanedRuntimePodTotalKey = "orphaned_runtime_pods_total" - RestartedPodTotalKey = "restarted_pods_total" - ImagePullDurationKey = "image_pull_duration_seconds" - CgroupVersionKey = "cgroup_version" - - // Metrics keys of remote runtime operations - RuntimeOperationsKey = "runtime_operations_total" - RuntimeOperationsDurationKey = "runtime_operations_duration_seconds" - RuntimeOperationsErrorsKey = "runtime_operations_errors_total" - // Metrics keys of device plugin operations - DevicePluginRegistrationCountKey = "device_plugin_registration_total" - DevicePluginAllocationDurationKey = "device_plugin_alloc_duration_seconds" - // Metrics keys of pod resources operations - PodResourcesEndpointRequestsTotalKey = "pod_resources_endpoint_requests_total" - PodResourcesEndpointRequestsListKey = "pod_resources_endpoint_requests_list" - PodResourcesEndpointRequestsGetAllocatableKey = "pod_resources_endpoint_requests_get_allocatable" - PodResourcesEndpointErrorsListKey = "pod_resources_endpoint_errors_list" - PodResourcesEndpointErrorsGetAllocatableKey = "pod_resources_endpoint_errors_get_allocatable" - PodResourcesEndpointRequestsGetKey = "pod_resources_endpoint_requests_get" - PodResourcesEndpointErrorsGetKey = "pod_resources_endpoint_errors_get" - - // Metrics keys for RuntimeClass - RunPodSandboxDurationKey = "run_podsandbox_duration_seconds" - RunPodSandboxErrorsKey = "run_podsandbox_errors_total" - - // Metrics to keep track of total number of Pods and Containers started - StartedPodsTotalKey = "started_pods_total" - StartedPodsErrorsTotalKey = "started_pods_errors_total" - StartedContainersTotalKey = "started_containers_total" - StartedContainersErrorsTotalKey = "started_containers_errors_total" - - // Metrics to track HostProcess container usage by this kubelet - StartedHostProcessContainersTotalKey = "started_host_process_containers_total" - StartedHostProcessContainersErrorsTotalKey = "started_host_process_containers_errors_total" - - // Metrics to track ephemeral container usage by this kubelet - ManagedEphemeralContainersKey = "managed_ephemeral_containers" - - // Metrics to track the CPU manager behavior - CPUManagerPinningRequestsTotalKey = "cpu_manager_pinning_requests_total" - CPUManagerPinningErrorsTotalKey = "cpu_manager_pinning_errors_total" - CPUManagerSharedPoolSizeMilliCoresKey = "cpu_manager_shared_pool_size_millicores" - CPUManagerExclusiveCPUsAllocationCountKey = "cpu_manager_exclusive_cpu_allocation_count" - CPUManagerAllocationPerNUMAKey = "cpu_manager_allocation_per_numa" - - // Metrics to track the Memory manager behavior - MemoryManagerPinningRequestsTotalKey = "memory_manager_pinning_requests_total" - MemoryManagerPinningErrorsTotalKey = "memory_manager_pinning_errors_total" - - // Metrics to track the Topology manager behavior - TopologyManagerAdmissionRequestsTotalKey = "topology_manager_admission_requests_total" - TopologyManagerAdmissionErrorsTotalKey = "topology_manager_admission_errors_total" - TopologyManagerAdmissionDurationKey = "topology_manager_admission_duration_ms" - - // Metrics to track orphan pod cleanup - orphanPodCleanedVolumesKey = "orphan_pod_cleaned_volumes" - orphanPodCleanedVolumesErrorsKey = "orphan_pod_cleaned_volumes_errors" - - // Metric for tracking garbage collected images - ImageGarbageCollectedTotalKey = "image_garbage_collected_total" - - // Metric for tracking aligment of compute resources - ContainerAlignedComputeResourcesNameKey = "container_aligned_compute_resources_count" - ContainerAlignedComputeResourcesFailureNameKey = "container_aligned_compute_resources_failure_count" - ContainerAlignedComputeResourcesScopeLabelKey = "scope" - ContainerAlignedComputeResourcesBoundaryLabelKey = "boundary" - - // Metric keys for DRA operations - DRAOperationsDurationKey = "operations_duration_seconds" - DRAGRPCOperationsDurationKey = "grpc_operations_duration_seconds" - - // Values used in metric labels - Container = "container" - InitContainer = "init_container" - EphemeralContainer = "ephemeral_container" - - AlignScopePod = "pod" - AlignScopeContainer = "container" - - AlignedPhysicalCPU = "physical_cpu" - AlignedNUMANode = "numa_node" - AlignedUncoreCache = "uncore_cache" - - // Metrics to track kubelet admission rejections. - AdmissionRejectionsTotalKey = "admission_rejections_total" - - // Image Volume metrics - ImageVolumeRequestedTotalKey = "image_volume_requested_total" - ImageVolumeMountedSucceedTotalKey = "image_volume_mounted_succeed_total" - ImageVolumeMountedErrorsTotalKey = "image_volume_mounted_errors_total" -) - -type imageSizeBucket struct { - lowerBoundInBytes uint64 - label string -} - -var ( - podStartupDurationBuckets = []float64{0.5, 1, 2, 3, 4, 5, 6, 8, 10, 20, 30, 45, 60, 120, 180, 240, 300, 360, 480, 600, 900, 1200, 1800, 2700, 3600} - imagePullDurationBuckets = []float64{1, 5, 10, 20, 30, 60, 120, 180, 240, 300, 360, 480, 600, 900, 1200, 1800, 2700, 3600} - // imageSizeBuckets has the labels to be associated with image_pull_duration_seconds metric. For example, if the size of - // an image pulled is between 1GB and 5GB, the label will be "1GB-5GB". - imageSizeBuckets = []imageSizeBucket{ - {0, "0-10MB"}, - {10 * 1024 * 1024, "10MB-100MB"}, - {100 * 1024 * 1024, "100MB-500MB"}, - {500 * 1024 * 1024, "500MB-1GB"}, - {1 * 1024 * 1024 * 1024, "1GB-5GB"}, - {5 * 1024 * 1024 * 1024, "5GB-10GB"}, - {10 * 1024 * 1024 * 1024, "10GB-20GB"}, - {20 * 1024 * 1024 * 1024, "20GB-30GB"}, - {30 * 1024 * 1024 * 1024, "30GB-40GB"}, - {40 * 1024 * 1024 * 1024, "40GB-60GB"}, - {60 * 1024 * 1024 * 1024, "60GB-100GB"}, - {100 * 1024 * 1024 * 1024, "GT100GB"}, - } - // DRADurationBuckets is the bucket boundaries for DRA operation duration metrics - // DRAOperationsDuration and DRAGRPCOperationsDuration defined below in this file. - // The buckets max value 40 is based on the 45sec max gRPC timeout value defined - // for the DRA gRPC calls in the pkg/kubelet/cm/dra/plugin/registration.go - DRADurationBuckets = metrics.ExponentialBucketsRange(.1, 40, 15) -) - -var ( - // NodeName is a Gauge that tracks the ode's name. The count is always 1. - NodeName = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: NodeNameKey, - Help: "The node's name. The count is always 1.", - StabilityLevel: metrics.ALPHA, - }, - []string{NodeLabelKey}, - ) - // ContainersPerPodCount is a Histogram that tracks the number of containers per pod. - ContainersPerPodCount = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: "containers_per_pod_count", - Help: "The number of containers per pod.", - Buckets: metrics.ExponentialBuckets(1, 2, 5), - StabilityLevel: metrics.ALPHA, - }, - ) - // PodWorkerDuration is a Histogram that tracks the duration (in seconds) in takes to sync a single pod. - // Broken down by the operation type. - PodWorkerDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: PodWorkerDurationKey, - Help: "Duration in seconds to sync a single pod. Broken down by operation type: create, update, or sync", - Buckets: metrics.DefBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{"operation_type"}, - ) - // PodStartDuration is a Histogram that tracks the duration (in seconds) it takes for a single pod to run since it's - // first time seen by kubelet. - PodStartDuration = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: PodStartDurationKey, - Help: "Duration in seconds from kubelet seeing a pod for the first time to the pod starting to run", - Buckets: podStartupDurationBuckets, - StabilityLevel: metrics.ALPHA, - }, - ) - // PodStartSLIDuration is a Histogram that tracks the duration (in seconds) it takes for a single pod to run, - // excluding the time for image pulling. This metric should reflect the "Pod startup latency SLI" definition - // ref: https://github.com/kubernetes/community/blob/master/sig-scalability/slos/pod_startup_latency.md - // - // The histogram bucket boundaries for pod startup latency metrics, measured in seconds. These are hand-picked - // so as to be roughly exponential but still round numbers in everyday units. This is to minimise the number - // of buckets while allowing accurate measurement of thresholds which might be used in SLOs - // e.g. x% of pods start up within 30 seconds, or 15 minutes, etc. - PodStartSLIDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: PodStartSLIDurationKey, - Help: "Duration in seconds to start a pod, excluding time to pull images and run init containers, measured from pod creation timestamp to when all its containers are reported as started and observed via watch", - Buckets: podStartupDurationBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{}, - ) - - // PodStartTotalDuration is a Histogram that tracks the duration (in seconds) it takes for a single pod to run - // since creation, including the time for image pulling. - // - // The histogram bucket boundaries for pod startup latency metrics, measured in seconds. These are hand-picked - // so as to be roughly exponential but still round numbers in everyday units. This is to minimise the number - // of buckets while allowing accurate measurement of thresholds which might be used in SLOs - // e.g. x% of pods start up within 30 seconds, or 15 minutes, etc. - PodStartTotalDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: PodStartTotalDurationKey, - Help: "Duration in seconds to start a pod since creation, including time to pull images and run init containers, measured from pod creation timestamp to when all its containers are reported as started and observed via watch", - Buckets: podStartupDurationBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{}, - ) - - // FirstNetworkPodStartSLIDuration is a gauge that tracks the duration (in seconds) it takes for the first network pod to run, - // excluding the time for image pulling. This is an internal and temporary metric required because of the existing limitations of the - // existing networking subsystem and CRI/CNI implementations that will be solved by https://github.com/containernetworking/cni/issues/859 - // The metric represents the latency observed by an user to run workloads in a new node. - // ref: https://github.com/kubernetes/community/blob/master/sig-scalability/slos/pod_startup_latency.md - FirstNetworkPodStartSLIDuration = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: FirstNetworkPodStartSLIDurationKey, - Help: "Duration in seconds to start the first network pod, excluding time to pull images and run init containers, measured from pod creation timestamp to when all its containers are reported as started and observed via watch", - StabilityLevel: metrics.INTERNAL, - }, - ) - - // CgroupManagerDuration is a Histogram that tracks the duration (in seconds) it takes for cgroup manager operations to complete. - // Broken down by method. - CgroupManagerDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: CgroupManagerOperationsKey, - Help: "Duration in seconds for cgroup manager operations. Broken down by method.", - Buckets: metrics.DefBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{"operation_type"}, - ) - // PodWorkerStartDuration is a Histogram that tracks the duration (in seconds) it takes from kubelet seeing a pod to starting a worker. - PodWorkerStartDuration = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: PodWorkerStartDurationKey, - Help: "Duration in seconds from kubelet seeing a pod to starting a worker.", - Buckets: metrics.DefBuckets, - StabilityLevel: metrics.ALPHA, - }, - ) - // PodStatusSyncDuration is a Histogram that tracks the duration (in seconds) in takes from the time a pod - // status is generated to the time it is synced with the apiserver. If multiple status changes are generated - // on a pod before it is written to the API, the latency is from the first update to the last event. - PodStatusSyncDuration = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: PodStatusSyncDurationKey, - Help: "Duration in seconds to sync a pod status update. Measures time from detection of a change to pod status until the API is successfully updated for that pod, even if multiple intevening changes to pod status occur.", - Buckets: []float64{0.010, 0.050, 0.100, 0.500, 1, 5, 10, 20, 30, 45, 60}, - StabilityLevel: metrics.ALPHA, - }, - ) - // PLEGRelistDuration is a Histogram that tracks the duration (in seconds) it takes for relisting pods in the Kubelet's - // Pod Lifecycle Event Generator (PLEG). - PLEGRelistDuration = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: PLEGRelistDurationKey, - Help: "Duration in seconds for relisting pods in PLEG.", - Buckets: metrics.DefBuckets, - StabilityLevel: metrics.ALPHA, - }, - ) - // PLEGDiscardEvents is a Counter that tracks the number of discarding events in the Kubelet's Pod Lifecycle Event Generator (PLEG). - PLEGDiscardEvents = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PLEGDiscardEventsKey, - Help: "The number of discard events in PLEG.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // PLEGRelistInterval is a Histogram that tracks the intervals (in seconds) between relisting in the Kubelet's - // Pod Lifecycle Event Generator (PLEG). - PLEGRelistInterval = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: PLEGRelistIntervalKey, - Help: "Interval in seconds between relisting in PLEG.", - Buckets: metrics.DefBuckets, - StabilityLevel: metrics.ALPHA, - }, - ) - // PLEGLastSeen is a Gauge giving the Unix timestamp when the Kubelet's - // Pod Lifecycle Event Generator (PLEG) was last seen active. - PLEGLastSeen = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: PLEGLastSeenKey, - Help: "Timestamp in seconds when PLEG was last seen active.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // EventedPLEGConnErr is a Counter that tracks the number of errors encountered during - // the establishment of streaming connection with the CRI runtime. - EventedPLEGConnErr = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: EventedPLEGConnErrKey, - Help: "The number of errors encountered during the establishment of streaming connection with the CRI runtime.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // EventedPLEGConn is a Counter that tracks the number of times a streaming client - // was obtained to receive CRI Events. - EventedPLEGConn = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: EventedPLEGConnKey, - Help: "The number of times a streaming client was obtained to receive CRI Events.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // EventedPLEGConnLatency is a Histogram that tracks the latency of streaming connection - // with the CRI runtime, measured in seconds. - EventedPLEGConnLatency = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: EventedPLEGConnLatencyKey, - Help: "The latency of streaming connection with the CRI runtime, measured in seconds.", - Buckets: metrics.DefBuckets, - StabilityLevel: metrics.ALPHA, - }, - ) - - // RuntimeOperations is a Counter that tracks the cumulative number of remote runtime operations. - // Broken down by operation type. - RuntimeOperations = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: RuntimeOperationsKey, - Help: "Cumulative number of runtime operations by operation type.", - StabilityLevel: metrics.ALPHA, - }, - []string{"operation_type"}, - ) - // RuntimeOperationsDuration is a Histogram that tracks the duration (in seconds) for remote runtime operations to complete. - // Broken down by operation type. - RuntimeOperationsDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: RuntimeOperationsDurationKey, - Help: "Duration in seconds of runtime operations. Broken down by operation type.", - Buckets: metrics.ExponentialBuckets(.005, 2.5, 14), - StabilityLevel: metrics.ALPHA, - }, - []string{"operation_type"}, - ) - // RuntimeOperationsErrors is a Counter that tracks the cumulative number of remote runtime operations errors. - // Broken down by operation type. - RuntimeOperationsErrors = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: RuntimeOperationsErrorsKey, - Help: "Cumulative number of runtime operation errors by operation type.", - StabilityLevel: metrics.ALPHA, - }, - []string{"operation_type"}, - ) - // Evictions is a Counter that tracks the cumulative number of pod evictions initiated by the kubelet. - // Broken down by eviction signal. - Evictions = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: EvictionsKey, - Help: "Cumulative number of pod evictions by eviction signal", - StabilityLevel: metrics.ALPHA, - }, - []string{"eviction_signal"}, - ) - // EvictionStatsAge is a Histogram that tracks the time (in seconds) between when stats are collected and when a pod is evicted - // based on those stats. Broken down by eviction signal. - EvictionStatsAge = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: EvictionStatsAgeKey, - Help: "Time between when stats are collected, and when pod is evicted based on those stats by eviction signal", - Buckets: metrics.DefBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{"eviction_signal"}, - ) - // Preemptions is a Counter that tracks the cumulative number of pod preemptions initiated by the kubelet. - // Broken down by preemption signal. A preemption is only recorded for one resource, the sum of all signals - // is the number of preemptions on the given node. - Preemptions = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PreemptionsKey, - Help: "Cumulative number of pod preemptions by preemption resource", - StabilityLevel: metrics.ALPHA, - }, - []string{"preemption_signal"}, - ) - // DevicePluginRegistrationCount is a Counter that tracks the cumulative number of device plugin registrations. - // Broken down by resource name. - DevicePluginRegistrationCount = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: DevicePluginRegistrationCountKey, - Help: "Cumulative number of device plugin registrations. Broken down by resource name.", - StabilityLevel: metrics.ALPHA, - }, - []string{"resource_name"}, - ) - // DevicePluginAllocationDuration is a Histogram that tracks the duration (in seconds) to serve a device plugin allocation request. - // Broken down by resource name. - DevicePluginAllocationDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: DevicePluginAllocationDurationKey, - Help: "Duration in seconds to serve a device plugin Allocation request. Broken down by resource name.", - Buckets: metrics.DefBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{"resource_name"}, - ) - - // PodResourcesEndpointRequestsTotalCount is a Counter that tracks the cumulative number of requests to the PodResource endpoints. - // Broken down by server API version. - PodResourcesEndpointRequestsTotalCount = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PodResourcesEndpointRequestsTotalKey, - Help: "Cumulative number of requests to the PodResource endpoint. Broken down by server api version.", - StabilityLevel: metrics.ALPHA, - }, - []string{"server_api_version"}, - ) - - // PodResourcesEndpointRequestsListCount is a Counter that tracks the number of requests to the PodResource List() endpoint. - // Broken down by server API version. - PodResourcesEndpointRequestsListCount = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PodResourcesEndpointRequestsListKey, - Help: "Number of requests to the PodResource List endpoint. Broken down by server api version.", - StabilityLevel: metrics.ALPHA, - }, - []string{"server_api_version"}, - ) - - // PodResourcesEndpointRequestsGetAllocatableCount is a Counter that tracks the number of requests to the PodResource GetAllocatableResources() endpoint. - // Broken down by server API version. - PodResourcesEndpointRequestsGetAllocatableCount = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PodResourcesEndpointRequestsGetAllocatableKey, - Help: "Number of requests to the PodResource GetAllocatableResources endpoint. Broken down by server api version.", - StabilityLevel: metrics.ALPHA, - }, - []string{"server_api_version"}, - ) - - // PodResourcesEndpointErrorsListCount is a Counter that tracks the number of errors returned by he PodResource List() endpoint. - // Broken down by server API version. - PodResourcesEndpointErrorsListCount = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PodResourcesEndpointErrorsListKey, - Help: "Number of requests to the PodResource List endpoint which returned error. Broken down by server api version.", - StabilityLevel: metrics.ALPHA, - }, - []string{"server_api_version"}, - ) - - // PodResourcesEndpointErrorsGetAllocatableCount is a Counter that tracks the number of errors returned by the PodResource GetAllocatableResources() endpoint. - // Broken down by server API version. - PodResourcesEndpointErrorsGetAllocatableCount = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PodResourcesEndpointErrorsGetAllocatableKey, - Help: "Number of requests to the PodResource GetAllocatableResources endpoint which returned error. Broken down by server api version.", - StabilityLevel: metrics.ALPHA, - }, - []string{"server_api_version"}, - ) - - // PodResourcesEndpointRequestsGetCount is a Counter that tracks the number of requests to the PodResource Get() endpoint. - // Broken down by server API version. - PodResourcesEndpointRequestsGetCount = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PodResourcesEndpointRequestsGetKey, - Help: "Number of requests to the PodResource Get endpoint. Broken down by server api version.", - StabilityLevel: metrics.ALPHA, - }, - []string{"server_api_version"}, - ) - - // PodResourcesEndpointErrorsGetCount is a Counter that tracks the number of errors returned by he PodResource List() endpoint. - // Broken down by server API version. - PodResourcesEndpointErrorsGetCount = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PodResourcesEndpointErrorsGetKey, - Help: "Number of requests to the PodResource Get endpoint which returned error. Broken down by server api version.", - StabilityLevel: metrics.ALPHA, - }, - []string{"server_api_version"}, - ) - - // RunPodSandboxDuration is a Histogram that tracks the duration (in seconds) it takes to run Pod Sandbox operations. - // Broken down by RuntimeClass.Handler. - RunPodSandboxDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: RunPodSandboxDurationKey, - Help: "Duration in seconds of the run_podsandbox operations. Broken down by RuntimeClass.Handler.", - // Use DefBuckets for now, will customize the buckets if necessary. - Buckets: metrics.DefBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{"runtime_handler"}, - ) - // RunPodSandboxErrors is a Counter that tracks the cumulative number of Pod Sandbox operations errors. - // Broken down by RuntimeClass.Handler. - RunPodSandboxErrors = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: RunPodSandboxErrorsKey, - Help: "Cumulative number of the run_podsandbox operation errors by RuntimeClass.Handler.", - StabilityLevel: metrics.ALPHA, - }, - []string{"runtime_handler"}, - ) - - // RunningPodCount is a gauge that tracks the number of Pods currently with a running sandbox - // It is used to expose the kubelet internal state: how many pods have running containers in the container runtime, and mainly for debugging purpose. - RunningPodCount = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: RunningPodsKey, - Help: "Number of pods that have a running pod sandbox", - StabilityLevel: metrics.ALPHA, - }, - ) - // RunningContainerCount is a gauge that tracks the number of containers currently running - RunningContainerCount = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: RunningContainersKey, - Help: "Number of containers currently running", - StabilityLevel: metrics.ALPHA, - }, - []string{"container_state"}, - ) - // DesiredPodCount tracks the count of pods the Kubelet thinks it should be running - DesiredPodCount = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: DesiredPodCountKey, - Help: "The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.", - StabilityLevel: metrics.ALPHA, - }, - []string{"static"}, - ) - // ActivePodCount tracks the count of pods the Kubelet considers as active when deciding to admit a new pod - ActivePodCount = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: ActivePodCountKey, - Help: "The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.", - StabilityLevel: metrics.ALPHA, - }, - []string{"static"}, - ) - // MirrorPodCount tracks the number of mirror pods the Kubelet should have created for static pods - MirrorPodCount = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: MirrorPodCountKey, - Help: "The number of mirror pods the kubelet will try to create (one per admitted static pod)", - StabilityLevel: metrics.ALPHA, - }, - ) - // WorkingPodCount tracks the count of pods in each lifecycle phase, whether they are static pods, and whether they are desired, orphaned, or runtime_only - WorkingPodCount = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: WorkingPodCountKey, - Help: "Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.", - StabilityLevel: metrics.ALPHA, - }, - []string{"lifecycle", "config", "static"}, - ) - // OrphanedRuntimePodTotal is incremented every time a pod is detected in the runtime without being known to the pod worker first - OrphanedRuntimePodTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: OrphanedRuntimePodTotalKey, - Help: "Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.", - StabilityLevel: metrics.ALPHA, - }, - ) - // RestartedPodTotal is incremented every time a pod with the same UID is deleted and recreated - RestartedPodTotal = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: RestartedPodTotalKey, - Help: "Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)", - StabilityLevel: metrics.ALPHA, - }, - []string{"static"}, - ) - // StartedPodsTotal is a counter that tracks pod sandbox creation operations - StartedPodsTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: StartedPodsTotalKey, - Help: "Cumulative number of pods started", - StabilityLevel: metrics.ALPHA, - }, - ) - // StartedPodsErrorsTotal is a counter that tracks the number of errors creating pod sandboxes - StartedPodsErrorsTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: StartedPodsErrorsTotalKey, - Help: "Cumulative number of errors when starting pods", - StabilityLevel: metrics.ALPHA, - }, - ) - // StartedContainersTotal is a counter that tracks the number of container creation operations - StartedContainersTotal = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: StartedContainersTotalKey, - Help: "Cumulative number of containers started", - StabilityLevel: metrics.ALPHA, - }, - []string{"container_type"}, - ) - // StartedContainersTotal is a counter that tracks the number of errors creating containers - StartedContainersErrorsTotal = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: StartedContainersErrorsTotalKey, - Help: "Cumulative number of errors when starting containers", - StabilityLevel: metrics.ALPHA, - }, - []string{"container_type", "code"}, - ) - // StartedHostProcessContainersTotal is a counter that tracks the number of hostprocess container creation operations - StartedHostProcessContainersTotal = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: StartedHostProcessContainersTotalKey, - Help: "Cumulative number of hostprocess containers started. This metric will only be collected on Windows.", - StabilityLevel: metrics.ALPHA, - }, - []string{"container_type"}, - ) - // StartedHostProcessContainersErrorsTotal is a counter that tracks the number of errors creating hostprocess containers - StartedHostProcessContainersErrorsTotal = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: StartedHostProcessContainersErrorsTotalKey, - Help: "Cumulative number of errors when starting hostprocess containers. This metric will only be collected on Windows.", - StabilityLevel: metrics.ALPHA, - }, - []string{"container_type", "code"}, - ) - // ManagedEphemeralContainers is a gauge that indicates how many ephemeral containers are managed by this kubelet. - ManagedEphemeralContainers = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: ManagedEphemeralContainersKey, - Help: "Current number of ephemeral containers in pods managed by this kubelet.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // GracefulShutdownStartTime is a gauge that records the time at which the kubelet started graceful shutdown. - GracefulShutdownStartTime = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: "graceful_shutdown_start_time_seconds", - Help: "Last graceful shutdown start time since unix epoch in seconds", - StabilityLevel: metrics.ALPHA, - }, - ) - - // GracefulShutdownEndTime is a gauge that records the time at which the kubelet completed graceful shutdown. - GracefulShutdownEndTime = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: "graceful_shutdown_end_time_seconds", - Help: "Last graceful shutdown end time since unix epoch in seconds", - StabilityLevel: metrics.ALPHA, - }, - ) - - LifecycleHandlerHTTPFallbacks = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: "lifecycle_handler_http_fallbacks_total", - Help: "The number of times lifecycle handlers successfully fell back to http from https.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // CPUManagerPinningRequestsTotal tracks the number of times the pod spec will cause the cpu manager to pin cores - CPUManagerPinningRequestsTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: CPUManagerPinningRequestsTotalKey, - Help: "The number of cpu core allocations which required pinning.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // CPUManagerPinningErrorsTotal tracks the number of times the pod spec required the cpu manager to pin cores, but the allocation failed - CPUManagerPinningErrorsTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: CPUManagerPinningErrorsTotalKey, - Help: "The number of cpu core allocations which required pinning failed.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // CPUManagerSharedPoolSizeMilliCores reports the current size of the shared CPU pool for non-guaranteed pods - CPUManagerSharedPoolSizeMilliCores = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: CPUManagerSharedPoolSizeMilliCoresKey, - Help: "The size of the shared CPU pool for non-guaranteed QoS pods, in millicores.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // CPUManagerExclusiveCPUsAllocationCount reports the total number of CPUs exclusively allocated to containers running on this node - CPUManagerExclusiveCPUsAllocationCount = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: CPUManagerExclusiveCPUsAllocationCountKey, - Help: "The total number of CPUs exclusively allocated to containers running on this node", - StabilityLevel: metrics.ALPHA, - }, - ) - - // CPUManagerAllocationPerNUMA tracks the count of CPUs allocated per NUMA node - CPUManagerAllocationPerNUMA = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: CPUManagerAllocationPerNUMAKey, - Help: "Number of CPUs allocated per NUMA node", - StabilityLevel: metrics.ALPHA, - }, - []string{AlignedNUMANode}, - ) - - // ContainerAlignedComputeResources reports the count of resources allocation which granted aligned resources, per alignment boundary - ContainerAlignedComputeResources = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: ContainerAlignedComputeResourcesNameKey, - Help: "Cumulative number of aligned compute resources allocated to containers by alignment type.", - StabilityLevel: metrics.ALPHA, - }, - []string{ContainerAlignedComputeResourcesScopeLabelKey, ContainerAlignedComputeResourcesBoundaryLabelKey}, - ) - - // ContainerAlignedComputeResourcesFailure reports the count of resources allocation attempts which failed to align resources, per alignment boundary - ContainerAlignedComputeResourcesFailure = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: ContainerAlignedComputeResourcesFailureNameKey, - Help: "Cumulative number of failures to allocate aligned compute resources to containers by alignment type.", - StabilityLevel: metrics.ALPHA, - }, - []string{ContainerAlignedComputeResourcesScopeLabelKey, ContainerAlignedComputeResourcesBoundaryLabelKey}, - ) - - MemoryManagerPinningRequestTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: MemoryManagerPinningRequestsTotalKey, - Help: "The number of memory pages allocations which required pinning.", - StabilityLevel: metrics.ALPHA, - }) - - // MemoryManagerPinningErrorsTotal tracks the number of times the pod spec required the memory manager to pin memory pages, but the allocation failed - MemoryManagerPinningErrorsTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: MemoryManagerPinningErrorsTotalKey, - Help: "The number of memory pages allocations which required pinning that failed.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // TopologyManagerAdmissionRequestsTotal tracks the number of times the pod spec will cause the topology manager to admit a pod - TopologyManagerAdmissionRequestsTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: TopologyManagerAdmissionRequestsTotalKey, - Help: "The number of admission requests where resources have to be aligned.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // TopologyManagerAdmissionErrorsTotal tracks the number of times the pod spec required the topology manager to admit a pod, but the admission failed - TopologyManagerAdmissionErrorsTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: TopologyManagerAdmissionErrorsTotalKey, - Help: "The number of admission request failures where resources could not be aligned.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // TopologyManagerAdmissionDuration is a Histogram that tracks the duration (in seconds) to serve a pod admission request. - TopologyManagerAdmissionDuration = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: TopologyManagerAdmissionDurationKey, - Help: "Duration in milliseconds to serve a pod admission request.", - Buckets: metrics.ExponentialBuckets(.05, 2, 15), - StabilityLevel: metrics.ALPHA, - }, - ) - - // OrphanPodCleanedVolumes is number of orphaned Pods that times that removeOrphanedPodVolumeDirs was called during the last sweep. - OrphanPodCleanedVolumes = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: orphanPodCleanedVolumesKey, - Help: "The total number of orphaned Pods whose volumes were cleaned in the last periodic sweep.", - StabilityLevel: metrics.ALPHA, - }, - ) - // OrphanPodCleanedVolumes is number of times that removeOrphanedPodVolumeDirs failed. - OrphanPodCleanedVolumesErrors = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: orphanPodCleanedVolumesErrorsKey, - Help: "The number of orphaned Pods whose volumes failed to be cleaned in the last periodic sweep.", - StabilityLevel: metrics.ALPHA, - }, - ) - - NodeStartupPreKubeletDuration = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: NodeStartupPreKubeletKey, - Help: "Duration in seconds of node startup before kubelet starts.", - StabilityLevel: metrics.ALPHA, - }, - ) - - NodeStartupPreRegistrationDuration = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: NodeStartupPreRegistrationKey, - Help: "Duration in seconds of node startup before registration.", - StabilityLevel: metrics.ALPHA, - }, - ) - - NodeStartupRegistrationDuration = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: NodeStartupRegistrationKey, - Help: "Duration in seconds of node startup during registration.", - StabilityLevel: metrics.ALPHA, - }, - ) - - NodeStartupPostRegistrationDuration = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: NodeStartupPostRegistrationKey, - Help: "Duration in seconds of node startup after registration.", - StabilityLevel: metrics.ALPHA, - }, - ) - - NodeStartupDuration = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: NodeStartupKey, - Help: "Duration in seconds of node startup in total.", - StabilityLevel: metrics.ALPHA, - }, - ) - - ImageGarbageCollectedTotal = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: ImageGarbageCollectedTotalKey, - Help: "Total number of images garbage collected by the kubelet, whether through disk usage or image age.", - StabilityLevel: metrics.ALPHA, - }, - []string{"reason"}, - ) - - // ImagePullDuration is a Histogram that tracks the duration (in seconds) it takes for an image to be pulled, - // including the time spent in the waiting queue of image puller. - // The metric is broken down by bucketed image size. - ImagePullDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: KubeletSubsystem, - Name: ImagePullDurationKey, - Help: "Duration in seconds to pull an image.", - Buckets: imagePullDurationBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{"image_size_in_bytes"}, - ) - - LifecycleHandlerSleepTerminated = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: "sleep_action_terminated_early_total", - Help: "The number of times lifecycle sleep handler got terminated before it finishes", - StabilityLevel: metrics.ALPHA, - }, - ) - - CgroupVersion = metrics.NewGauge( - &metrics.GaugeOpts{ - Subsystem: KubeletSubsystem, - Name: CgroupVersionKey, - Help: "cgroup version on the hosts.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // DRAOperationsDuration tracks the duration of the DRA PrepareResources and UnprepareResources requests. - DRAOperationsDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: DRASubsystem, - Name: DRAOperationsDurationKey, - Help: "Latency histogram in seconds for the duration of handling all ResourceClaims referenced by a pod when the pod starts or stops. Identified by the name of the operation (PrepareResources or UnprepareResources) and separated by the success of the operation. The number of failed operations is provided through the histogram's overall count.", - Buckets: DRADurationBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{"operation_name", "is_error"}, - ) - - // DRAGRPCOperationsDuration tracks the duration of the DRA GRPC operations. - DRAGRPCOperationsDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: DRASubsystem, - Name: DRAGRPCOperationsDurationKey, - Help: "Duration in seconds of the DRA gRPC operations", - Buckets: DRADurationBuckets, - StabilityLevel: metrics.ALPHA, - }, - []string{"driver_name", "method_name", "grpc_status_code"}, - ) - - // AdmissionRejectionsTotal tracks the number of failed admission times, currently, just record it for pod additions - AdmissionRejectionsTotal = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: AdmissionRejectionsTotalKey, - Help: "Cumulative number pod admission rejections by the Kubelet.", - StabilityLevel: metrics.ALPHA, - }, - []string{"reason"}, - ) - - // ImageVolumeRequestedTotal trakcs the number of requested image volumes. - ImageVolumeRequestedTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: ImageVolumeRequestedTotalKey, - Help: "Number of requested image volumes.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // ImageVolumeMountedSucceedTotal tracks the number of successful image volume mounts. - ImageVolumeMountedSucceedTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: ImageVolumeMountedSucceedTotalKey, - Help: "Number of successful image volume mounts.", - StabilityLevel: metrics.ALPHA, - }, - ) - - // ImageVolumeMountedErrorsTotal tracks the number of failed image volume mounts. - ImageVolumeMountedErrorsTotal = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: ImageVolumeMountedErrorsTotalKey, - Help: "Number of failed image volume mounts.", - StabilityLevel: metrics.ALPHA, - }, - ) -) - -var registerMetrics sync.Once - -// Register registers all metrics. -func Register(collectors ...metrics.StableCollector) { - // Register the metrics. - registerMetrics.Do(func() { - legacyregistry.MustRegister(FirstNetworkPodStartSLIDuration) - legacyregistry.MustRegister(NodeName) - legacyregistry.MustRegister(PodWorkerDuration) - legacyregistry.MustRegister(PodStartDuration) - legacyregistry.MustRegister(PodStartSLIDuration) - legacyregistry.MustRegister(PodStartTotalDuration) - legacyregistry.MustRegister(ImagePullDuration) - legacyregistry.MustRegister(ImageGarbageCollectedTotal) - legacyregistry.MustRegister(NodeStartupPreKubeletDuration) - legacyregistry.MustRegister(NodeStartupPreRegistrationDuration) - legacyregistry.MustRegister(NodeStartupRegistrationDuration) - legacyregistry.MustRegister(NodeStartupPostRegistrationDuration) - legacyregistry.MustRegister(NodeStartupDuration) - legacyregistry.MustRegister(CgroupManagerDuration) - legacyregistry.MustRegister(PodWorkerStartDuration) - legacyregistry.MustRegister(PodStatusSyncDuration) - legacyregistry.MustRegister(ContainersPerPodCount) - legacyregistry.MustRegister(PLEGRelistDuration) - legacyregistry.MustRegister(PLEGDiscardEvents) - legacyregistry.MustRegister(PLEGRelistInterval) - legacyregistry.MustRegister(PLEGLastSeen) - legacyregistry.MustRegister(EventedPLEGConnErr) - legacyregistry.MustRegister(EventedPLEGConn) - legacyregistry.MustRegister(EventedPLEGConnLatency) - legacyregistry.MustRegister(RuntimeOperations) - legacyregistry.MustRegister(RuntimeOperationsDuration) - legacyregistry.MustRegister(RuntimeOperationsErrors) - legacyregistry.MustRegister(Evictions) - legacyregistry.MustRegister(EvictionStatsAge) - legacyregistry.MustRegister(Preemptions) - legacyregistry.MustRegister(DevicePluginRegistrationCount) - legacyregistry.MustRegister(DevicePluginAllocationDuration) - legacyregistry.MustRegister(RunningContainerCount) - legacyregistry.MustRegister(RunningPodCount) - legacyregistry.MustRegister(DesiredPodCount) - legacyregistry.MustRegister(ActivePodCount) - legacyregistry.MustRegister(MirrorPodCount) - legacyregistry.MustRegister(WorkingPodCount) - legacyregistry.MustRegister(OrphanedRuntimePodTotal) - legacyregistry.MustRegister(RestartedPodTotal) - legacyregistry.MustRegister(ManagedEphemeralContainers) - legacyregistry.MustRegister(PodResourcesEndpointRequestsTotalCount) - legacyregistry.MustRegister(PodResourcesEndpointRequestsListCount) - legacyregistry.MustRegister(PodResourcesEndpointRequestsGetAllocatableCount) - legacyregistry.MustRegister(PodResourcesEndpointErrorsListCount) - legacyregistry.MustRegister(PodResourcesEndpointErrorsGetAllocatableCount) - if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResourcesGet) { - legacyregistry.MustRegister(PodResourcesEndpointRequestsGetCount) - legacyregistry.MustRegister(PodResourcesEndpointErrorsGetCount) - } - legacyregistry.MustRegister(StartedPodsTotal) - legacyregistry.MustRegister(StartedPodsErrorsTotal) - legacyregistry.MustRegister(StartedContainersTotal) - legacyregistry.MustRegister(StartedContainersErrorsTotal) - legacyregistry.MustRegister(StartedHostProcessContainersTotal) - legacyregistry.MustRegister(StartedHostProcessContainersErrorsTotal) - legacyregistry.MustRegister(RunPodSandboxDuration) - legacyregistry.MustRegister(RunPodSandboxErrors) - legacyregistry.MustRegister(CPUManagerPinningRequestsTotal) - legacyregistry.MustRegister(CPUManagerPinningErrorsTotal) - legacyregistry.MustRegister(CPUManagerSharedPoolSizeMilliCores) - legacyregistry.MustRegister(CPUManagerExclusiveCPUsAllocationCount) - legacyregistry.MustRegister(CPUManagerAllocationPerNUMA) - legacyregistry.MustRegister(ContainerAlignedComputeResources) - legacyregistry.MustRegister(ContainerAlignedComputeResourcesFailure) - legacyregistry.MustRegister(MemoryManagerPinningRequestTotal) - legacyregistry.MustRegister(MemoryManagerPinningErrorsTotal) - legacyregistry.MustRegister(TopologyManagerAdmissionRequestsTotal) - legacyregistry.MustRegister(TopologyManagerAdmissionErrorsTotal) - legacyregistry.MustRegister(TopologyManagerAdmissionDuration) - legacyregistry.MustRegister(OrphanPodCleanedVolumes) - legacyregistry.MustRegister(OrphanPodCleanedVolumesErrors) - - for _, collector := range collectors { - legacyregistry.CustomMustRegister(collector) - } - - if utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdown) && - utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdownBasedOnPodPriority) { - legacyregistry.MustRegister(GracefulShutdownStartTime) - legacyregistry.MustRegister(GracefulShutdownEndTime) - } - - legacyregistry.MustRegister(LifecycleHandlerHTTPFallbacks) - legacyregistry.MustRegister(LifecycleHandlerSleepTerminated) - legacyregistry.MustRegister(CgroupVersion) - - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - legacyregistry.MustRegister(DRAOperationsDuration) - legacyregistry.MustRegister(DRAGRPCOperationsDuration) - } - - legacyregistry.MustRegister(AdmissionRejectionsTotal) - - if utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) { - legacyregistry.MustRegister(ImageVolumeRequestedTotal) - legacyregistry.MustRegister(ImageVolumeMountedSucceedTotal) - legacyregistry.MustRegister(ImageVolumeMountedErrorsTotal) - } - }) -} - -// GetGather returns the gatherer. It used by test case outside current package. -func GetGather() metrics.Gatherer { - return legacyregistry.DefaultGatherer -} - -// SinceInSeconds gets the time since the specified start in seconds. -func SinceInSeconds(start time.Time) float64 { - return time.Since(start).Seconds() -} - -// SetNodeName sets the NodeName Gauge to 1. -func SetNodeName(name types.NodeName) { - NodeName.WithLabelValues(string(name)).Set(1) -} - -func GetImageSizeBucket(sizeInBytes uint64) string { - if sizeInBytes == 0 { - return "N/A" - } - - for i := len(imageSizeBuckets) - 1; i >= 0; i-- { - if sizeInBytes > imageSizeBuckets[i].lowerBoundInBytes { - return imageSizeBuckets[i].label - } - } - - // return empty string when sizeInBytes is 0 (error getting image size) - return "" -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go deleted file mode 100644 index cdaa42c0a3a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package cache implements data structures used by the kubelet plugin manager to -keep track of registered plugins. -*/ -package cache - -import ( - "fmt" - "sync" - "time" - - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" -) - -// ActualStateOfWorld defines a set of thread-safe operations for the kubelet -// plugin manager's actual state of the world cache. -// This cache contains a map of socket file path to plugin information of -// all plugins attached to this node. -type ActualStateOfWorld interface { - - // GetRegisteredPlugins generates and returns a list of plugins - // that are successfully registered plugins in the current actual state of world. - GetRegisteredPlugins() []PluginInfo - - // AddPlugin add the given plugin in the cache. - // An error will be returned if socketPath of the PluginInfo object is empty. - // Note that this is different from desired world cache's AddOrUpdatePlugin - // because for the actual state of world cache, there won't be a scenario where - // we need to update an existing plugin if the timestamps don't match. This is - // because the plugin should have been unregistered in the reconciler and therefore - // removed from the actual state of world cache first before adding it back into - // the actual state of world cache again with the new timestamp - AddPlugin(pluginInfo PluginInfo) error - - // RemovePlugin deletes the plugin with the given socket path from the actual - // state of world. - // If a plugin does not exist with the given socket path, this is a no-op. - RemovePlugin(socketPath string) - - // PluginExistsWithCorrectTimestamp checks if the given plugin exists in the current actual - // state of world cache with the correct timestamp. - // Deprecated: please use `PluginExistsWithCorrectUUID` instead as it provides a better - // cross-platform support - PluginExistsWithCorrectTimestamp(pluginInfo PluginInfo) bool - - // PluginExistsWithCorrectUUID checks if the given plugin exists in the current actual - // state of world cache with the correct UUID - PluginExistsWithCorrectUUID(pluginInfo PluginInfo) bool -} - -// NewActualStateOfWorld returns a new instance of ActualStateOfWorld -func NewActualStateOfWorld() ActualStateOfWorld { - return &actualStateOfWorld{ - socketFileToInfo: make(map[string]PluginInfo), - } -} - -type actualStateOfWorld struct { - - // socketFileToInfo is a map containing the set of successfully registered plugins - // The keys are plugin socket file paths. The values are PluginInfo objects - socketFileToInfo map[string]PluginInfo - sync.RWMutex -} - -var _ ActualStateOfWorld = &actualStateOfWorld{} - -// PluginInfo holds information of a plugin -type PluginInfo struct { - SocketPath string - Timestamp time.Time - UUID types.UID - Handler PluginHandler - Name string - Endpoint string -} - -func (asw *actualStateOfWorld) AddPlugin(pluginInfo PluginInfo) error { - asw.Lock() - defer asw.Unlock() - - if pluginInfo.SocketPath == "" { - return fmt.Errorf("socket path is empty") - } - if _, ok := asw.socketFileToInfo[pluginInfo.SocketPath]; ok { - klog.V(2).InfoS("Plugin exists in actual state cache", "path", pluginInfo.SocketPath) - } - asw.socketFileToInfo[pluginInfo.SocketPath] = pluginInfo - return nil -} - -func (asw *actualStateOfWorld) RemovePlugin(socketPath string) { - asw.Lock() - defer asw.Unlock() - - delete(asw.socketFileToInfo, socketPath) -} - -func (asw *actualStateOfWorld) GetRegisteredPlugins() []PluginInfo { - asw.RLock() - defer asw.RUnlock() - - currentPlugins := []PluginInfo{} - for _, pluginInfo := range asw.socketFileToInfo { - currentPlugins = append(currentPlugins, pluginInfo) - } - return currentPlugins -} - -func (asw *actualStateOfWorld) PluginExistsWithCorrectTimestamp(pluginInfo PluginInfo) bool { - asw.RLock() - defer asw.RUnlock() - - // We need to check both if the socket file path exists, and the timestamp - // matches the given plugin (from the desired state cache) timestamp - actualStatePlugin, exists := asw.socketFileToInfo[pluginInfo.SocketPath] - return exists && (actualStatePlugin.Timestamp == pluginInfo.Timestamp) -} - -func (asw *actualStateOfWorld) PluginExistsWithCorrectUUID(pluginInfo PluginInfo) bool { - asw.RLock() - defer asw.RUnlock() - - // We need to check both if the socket file path exists, and the UUID - // matches the given plugin (from the desired state cache) UUID - actualStatePlugin, exists := asw.socketFileToInfo[pluginInfo.SocketPath] - return exists && (actualStatePlugin.UUID == pluginInfo.UUID) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go deleted file mode 100644 index 90e38ddd736..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/desired_state_of_world.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package cache implements data structures used by the kubelet plugin manager to -keep track of registered plugins. -*/ -package cache - -import ( - "errors" - "fmt" - "sync" - "time" - - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/klog/v2" -) - -// DesiredStateOfWorld defines a set of thread-safe operations for the kubelet -// plugin manager's desired state of the world cache. -// This cache contains a map of socket file path to plugin information of -// all plugins attached to this node. -type DesiredStateOfWorld interface { - // AddOrUpdatePlugin add the given plugin in the cache if it doesn't already exist. - // If it does exist in the cache, then the timestamp of the PluginInfo object in the cache will be updated. - // An error will be returned if socketPath is empty. - AddOrUpdatePlugin(socketPath string) error - - // RemovePlugin deletes the plugin with the given socket path from the desired - // state of world. - // If a plugin does not exist with the given socket path, this is a no-op. - RemovePlugin(socketPath string) - - // GetPluginsToRegister generates and returns a list of plugins - // in the current desired state of world. - GetPluginsToRegister() []PluginInfo - - // PluginExists checks if the given socket path exists in the current desired - // state of world cache - PluginExists(socketPath string) bool -} - -// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld. -func NewDesiredStateOfWorld() DesiredStateOfWorld { - return &desiredStateOfWorld{ - socketFileToInfo: make(map[string]PluginInfo), - } -} - -type desiredStateOfWorld struct { - - // socketFileToInfo is a map containing the set of successfully registered plugins - // The keys are plugin socket file paths. The values are PluginInfo objects - socketFileToInfo map[string]PluginInfo - sync.RWMutex -} - -var _ DesiredStateOfWorld = &desiredStateOfWorld{} - -// Generate a detailed error msg for logs -func generatePluginMsgDetailed(prefixMsg, suffixMsg, socketPath, details string) (detailedMsg string) { - return fmt.Sprintf("%v for plugin at %q %v %v", prefixMsg, socketPath, details, suffixMsg) -} - -// Generate a simplified error msg for events and a detailed error msg for logs -func generatePluginMsg(prefixMsg, suffixMsg, socketPath, details string) (simpleMsg, detailedMsg string) { - simpleMsg = fmt.Sprintf("%v for plugin at %q %v", prefixMsg, socketPath, suffixMsg) - return simpleMsg, generatePluginMsgDetailed(prefixMsg, suffixMsg, socketPath, details) -} - -// GenerateMsgDetailed returns detailed msgs for plugins to register -// that can be used in logs. -// The msg format follows the pattern " " -func (plugin *PluginInfo) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) { - detailedStr := fmt.Sprintf("(plugin details: %v)", plugin) - return generatePluginMsgDetailed(prefixMsg, suffixMsg, plugin.SocketPath, detailedStr) -} - -// GenerateMsg returns simple and detailed msgs for plugins to register -// that is user friendly and a detailed msg that can be used in logs. -// The msg format follows the pattern " ". -func (plugin *PluginInfo) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) { - detailedStr := fmt.Sprintf("(plugin details: %v)", plugin) - return generatePluginMsg(prefixMsg, suffixMsg, plugin.SocketPath, detailedStr) -} - -// GenerateErrorDetailed returns detailed errors for plugins to register -// that can be used in logs. -// The msg format follows the pattern " : ", -func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) { - return errors.New(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err))) -} - -// GenerateError returns simple and detailed errors for plugins to register -// that is user friendly and a detailed error that can be used in logs. -// The msg format follows the pattern " : ". -func (plugin *PluginInfo) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) { - simpleMsg, detailedMsg := plugin.GenerateMsg(prefixMsg, errSuffix(err)) - return errors.New(simpleMsg), errors.New(detailedMsg) -} - -// Generates an error string with the format ": " if err exists -func errSuffix(err error) string { - errStr := "" - if err != nil { - errStr = fmt.Sprintf(": %v", err) - } - return errStr -} - -func (dsw *desiredStateOfWorld) AddOrUpdatePlugin(socketPath string) error { - dsw.Lock() - defer dsw.Unlock() - - if socketPath == "" { - return fmt.Errorf("socket path is empty") - } - if _, ok := dsw.socketFileToInfo[socketPath]; ok { - klog.V(2).InfoS("Plugin exists in desired state cache, timestamp will be updated", "path", socketPath) - } - - // Update the PluginInfo object. - // Note that we only update the timestamp and UUID in the desired state of world, not the actual state of world - // because in the reconciler, we need to check if the plugin in the actual state of world is the same - // version as the plugin in the desired state of world - dsw.socketFileToInfo[socketPath] = PluginInfo{ - SocketPath: socketPath, - Timestamp: time.Now(), - UUID: uuid.NewUUID(), - } - return nil -} - -func (dsw *desiredStateOfWorld) RemovePlugin(socketPath string) { - dsw.Lock() - defer dsw.Unlock() - - delete(dsw.socketFileToInfo, socketPath) -} - -func (dsw *desiredStateOfWorld) GetPluginsToRegister() []PluginInfo { - dsw.RLock() - defer dsw.RUnlock() - - pluginsToRegister := []PluginInfo{} - for _, pluginInfo := range dsw.socketFileToInfo { - pluginsToRegister = append(pluginsToRegister, pluginInfo) - } - return pluginsToRegister -} - -func (dsw *desiredStateOfWorld) PluginExists(socketPath string) bool { - dsw.RLock() - defer dsw.RUnlock() - - _, exists := dsw.socketFileToInfo[socketPath] - return exists -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go deleted file mode 100644 index bcffd117ef2..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/types.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import "time" - -// PluginHandler is an interface a client of the pluginwatcher API needs to implement in -// order to consume plugins -// The PluginHandler follows the simple following state machine: -// -// +--------------------------------------+ -// | ReRegistration | -// | Socket created with same plugin name | -// | | -// | | -// Socket Created v + Socket Deleted -// +------------------> Validate +---------------------------> Register +------------------> DeRegister -// + + + -// | | | -// | Error | Error | -// | | | -// v v v -// Out Out Out -// -// The pluginwatcher module follows strictly and sequentially this state machine for each *plugin name*. -// e.g: If you are Registering a plugin foo, you cannot get a DeRegister call for plugin foo -// until the Register("foo") call returns. Nor will you get a Validate("foo", "Different endpoint", ...) -// call until the Register("foo") call returns. -// -// ReRegistration: Socket created with same plugin name, usually for a plugin update -// e.g: plugin with name foo registers at foo.com/foo-1.9.7 later a plugin with name foo -// registers at foo.com/foo-1.9.9 -// -// DeRegistration: When ReRegistration happens only the deletion of the new socket will trigger a DeRegister call -type PluginHandler interface { - // Validate returns an error if the information provided by - // the potential plugin is erroneous (unsupported version, ...) - ValidatePlugin(pluginName string, endpoint string, versions []string) error - // RegisterPlugin is called so that the plugin can be registered by any - // plugin consumer - // Error encountered here can still be Notified to the plugin. - RegisterPlugin(pluginName, endpoint string, versions []string, pluginClientTimeout *time.Duration) error - // DeRegisterPlugin is called once the pluginwatcher observes that the socket has - // been deleted. - DeRegisterPlugin(pluginName, endpoint string) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go deleted file mode 100644 index acd561b216c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package qos contains helper functions for quality of service. -// For each resource (memory, CPU) Kubelet supports three classes of containers. -// Memory guaranteed containers will receive the highest priority and will get all the resources -// they need. -// Burstable containers will be guaranteed their request and can "burst" and use more resources -// when available. -// Best-Effort containers, which don't specify a request, can use resources only if not being used -// by other pods. -package qos diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/qos/helpers.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/qos/helpers.go deleted file mode 100644 index 33b6c166b7f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/qos/helpers.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package qos contains helper functions for quality of service. -// For each resource (memory, CPU) Kubelet supports three classes of containers. -// Memory guaranteed containers will receive the highest priority and will get all the resources -// they need. -// Burstable containers will be guaranteed their request and can "burst" and use more resources -// when available. -// Best-Effort containers, which don't specify a request, can use resources only if not being used -// by other pods. - -package qos - -import ( - v1 "k8s.io/api/core/v1" - resourcehelper "k8s.io/component-helpers/resource" -) - -// minRegularContainerMemory returns the minimum memory resource quantity -// across all regular containers in pod.Spec.Containers. -// It does not include initContainers (both restartable and non-restartable). -func minRegularContainerMemory(pod v1.Pod) int64 { - memoryValue := pod.Spec.Containers[0].Resources.Requests.Memory().Value() - for _, container := range pod.Spec.Containers[1:] { - if container.Resources.Requests.Memory().Value() < memoryValue { - memoryValue = container.Resources.Requests.Memory().Value() - } - } - return memoryValue -} - -// remainingPodMemReqPerContainer calculates the remaining pod memory request per -// container by: -// 1. Taking the total pod memory requests -// 2. Subtracting total container memory requests from pod memory requests -// 3. Dividing the remainder by the number of containers. -// This gives us the additional memory request that is not allocated to any -// containers in the pod. This value will be divided equally among all containers to -// calculate oom score adjusment. -// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#oom-score-adjustment -// for more details. -func remainingPodMemReqPerContainer(pod *v1.Pod) int64 { - var remainingMemory int64 - if pod.Spec.Resources.Requests.Memory().IsZero() { - return remainingMemory - } - - numContainers := len(pod.Spec.Containers) + len(pod.Spec.InitContainers) - - // Aggregated requests of all containers. - aggrContainerReqs := resourcehelper.AggregateContainerRequests(pod, resourcehelper.PodResourcesOptions{}) - - remainingMemory = pod.Spec.Resources.Requests.Memory().Value() - aggrContainerReqs.Memory().Value() - - remainingMemoryPerContainer := remainingMemory / int64(numContainers) - return remainingMemoryPerContainer -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go deleted file mode 100644 index a70945ed217..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package qos - -import ( - v1 "k8s.io/api/core/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" - resourcehelper "k8s.io/component-helpers/resource" - v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/types" -) - -const ( - // KubeletOOMScoreAdj is the OOM score adjustment for Kubelet - KubeletOOMScoreAdj int = -999 - // KubeProxyOOMScoreAdj is the OOM score adjustment for kube-proxy - KubeProxyOOMScoreAdj int = -999 - guaranteedOOMScoreAdj int = -997 - besteffortOOMScoreAdj int = 1000 -) - -// GetContainerOOMScoreAdjust returns the amount by which the OOM score of all processes in the -// container should be adjusted. -// The OOM score of a process is the percentage of memory it consumes -// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000 -// and 1000. Containers with higher OOM scores are killed if the system runs out of memory. -// See https://lwn.net/Articles/391222/ for more information. -// OOMScoreAdjust should be calculated based on the allocated resources, so the pod argument should -// contain the allocated resources in the spec. -func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapacity int64) int { - if types.IsNodeCriticalPod(pod) { - // Only node critical pod should be the last to get killed. - return guaranteedOOMScoreAdj - } - - switch v1qos.GetPodQOS(pod) { - case v1.PodQOSGuaranteed: - // Guaranteed containers should be the last to get killed. - return guaranteedOOMScoreAdj - case v1.PodQOSBestEffort: - return besteffortOOMScoreAdj - } - - // Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally, - // we want to protect Burstable containers that consume less memory than requested. - // The formula below is a heuristic. A container requesting for 10% of a system's - // memory will have an OOM score adjust of 900. If a process in container Y - // uses over 10% of memory, its OOM score will be 1000. The idea is that containers - // which use more than their request will have an OOM score of 1000 and will be prime - // targets for OOM kills. - // Note that this is a heuristic, it won't work if a container has many small processes. - containerMemReq := container.Resources.Requests.Memory().Value() - - var oomScoreAdjust, remainingReqPerContainer int64 - // When PodLevelResources feature is enabled, the OOM score adjustment formula is modified - // to account for pod-level memory requests. Any extra pod memory request that's - // not allocated to the containers is divided equally among all containers and - // added to their individual memory requests when calculating the OOM score - // adjustment. Otherwise, only container-level memory requests are used. See - // https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#oom-score-adjustment - // for more details. - if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && - resourcehelper.IsPodLevelRequestsSet(pod) { - // TODO(ndixita): Refactor to use this formula in all cases, as - // remainingReqPerContainer will be 0 when pod-level resources are not set. - remainingReqPerContainer = remainingPodMemReqPerContainer(pod) - oomScoreAdjust = 1000 - (1000 * (containerMemReq + remainingReqPerContainer) / memoryCapacity) - } else { - oomScoreAdjust = 1000 - (1000*containerMemReq)/memoryCapacity - } - - // adapt the sidecarContainer memoryRequest for OOM ADJ calculation - // calculate the oom score adjustment based on: max-memory( currentSideCarContainer , min-memory(regular containers) ) . - if isSidecarContainer(pod, container) { - // check min memory quantity in regular containers - minMemoryRequest := minRegularContainerMemory(*pod) - - // When calculating minMemoryOomScoreAdjust for sidecar containers with PodLevelResources enabled, - // we add the per-container share of unallocated pod memory requests to the minimum memory request. - // This ensures the OOM score adjustment i.e. minMemoryOomScoreAdjust - // calculation remains consistent - // with how we handle pod-level memory requests for regular containers. - if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && - resourcehelper.IsPodLevelRequestsSet(pod) { - minMemoryRequest += remainingReqPerContainer - } - minMemoryOomScoreAdjust := 1000 - (1000*minMemoryRequest)/memoryCapacity - // the OOM adjustment for sidecar container will match - // or fall below the OOM score adjustment of regular containers in the Pod. - if oomScoreAdjust > minMemoryOomScoreAdjust { - oomScoreAdjust = minMemoryOomScoreAdjust - } - } - - // A guaranteed pod using 100% of memory can have an OOM score of 10. Ensure - // that burstable pods have a higher OOM score adjustment. - if int(oomScoreAdjust) < (1000 + guaranteedOOMScoreAdj) { - return (1000 + guaranteedOOMScoreAdj) - } - // Give burstable pods a higher chance of survival over besteffort pods. - if int(oomScoreAdjust) == besteffortOOMScoreAdj { - return int(oomScoreAdjust - 1) - } - return int(oomScoreAdjust) -} - -// isSidecarContainer returns a boolean indicating whether a container is a sidecar or not. -// Since v1.Container does not directly specify whether a container is a sidecar, -// this function uses available indicators (container.RestartPolicy == v1.ContainerRestartPolicyAlways) -// to make that determination. -func isSidecarContainer(pod *v1.Pod, container *v1.Container) bool { - if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways { - for _, initContainer := range pod.Spec.InitContainers { - if initContainer.Name == container.Name { - return true - } - } - } - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/pidlimit.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/pidlimit.go deleted file mode 100644 index 7356fa9c416..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/pidlimit.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pidlimit - -import ( - "k8s.io/api/core/v1" -) - -const ( - // PIDs is the (internal) name for this resource - PIDs v1.ResourceName = "pid" -) diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/pidlimit_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/pidlimit_linux.go deleted file mode 100644 index 25e76552032..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/stats/pidlimit/pidlimit_linux.go +++ /dev/null @@ -1,87 +0,0 @@ -//go:build linux -// +build linux - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pidlimit - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" -) - -// Stats provides basic information about max and current process count -func Stats() (*statsapi.RlimitStats, error) { - rlimit := &statsapi.RlimitStats{} - - taskMax := int64(-1) - // Calculate the minimum of kernel.pid_max and kernel.threads-max as they both specify the - // system-wide limit on the number of tasks. - for _, file := range []string{"/proc/sys/kernel/pid_max", "/proc/sys/kernel/threads-max"} { - if content, err := os.ReadFile(file); err == nil { - if limit, err := strconv.ParseInt(string(content[:len(content)-1]), 10, 64); err == nil { - if taskMax == -1 || taskMax > limit { - taskMax = limit - } - } - } - } - // Both reads did not fail. - if taskMax >= 0 { - rlimit.MaxPID = &taskMax - } - - // Prefer to read "/proc/loadavg" when possible because sysinfo(2) - // returns truncated number when greater than 65538. See - // https://github.com/kubernetes/kubernetes/issues/107107 - if procs, err := runningTaskCount(); err == nil { - rlimit.NumOfRunningProcesses = &procs - } else { - var info syscall.Sysinfo_t - syscall.Sysinfo(&info) - procs := int64(info.Procs) - rlimit.NumOfRunningProcesses = &procs - } - - rlimit.Time = v1.NewTime(time.Now()) - - return rlimit, nil -} - -func runningTaskCount() (int64, error) { - // Example: 1.36 3.49 4.53 2/3518 3715089 - bytes, err := os.ReadFile("/proc/loadavg") - if err != nil { - return 0, err - } - fields := strings.Fields(string(bytes)) - if len(fields) < 5 { - return 0, fmt.Errorf("not enough fields in /proc/loadavg") - } - subfields := strings.Split(fields[3], "/") - if len(subfields) != 2 { - return 0, fmt.Errorf("error parsing fourth field of /proc/loadavg") - } - return strconv.ParseInt(subfields[1], 10, 64) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/.mockery.yaml b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/.mockery.yaml deleted file mode 100644 index e57d22e2ee2..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/.mockery.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -dir: testing -filename: "mock_{{.InterfaceName | snakecase}}.go" -boilerplate-file: ../../../hack/boilerplate/boilerplate.generatego.txt -outpkg: testing -with-expecter: true -packages: - k8s.io/kubernetes/pkg/kubelet/status: - interfaces: - PodStatusProvider: diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go deleted file mode 100644 index d519bf4c703..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go +++ /dev/null @@ -1,302 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "fmt" - "strings" - - v1 "k8s.io/api/core/v1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util" -) - -const ( - // UnknownContainerStatuses says that all container statuses are unknown. - UnknownContainerStatuses = "UnknownContainerStatuses" - // PodCompleted says that all related containers have succeeded. - PodCompleted = "PodCompleted" - // PodFailed says that the pod has failed and as such the containers have failed. - PodFailed = "PodFailed" - // ContainersNotReady says that one or more containers are not ready. - ContainersNotReady = "ContainersNotReady" - // ContainersNotInitialized says that one or more init containers have not succeeded. - ContainersNotInitialized = "ContainersNotInitialized" - // ReadinessGatesNotReady says that one or more pod readiness gates are not ready. - ReadinessGatesNotReady = "ReadinessGatesNotReady" -) - -// GenerateContainersReadyCondition returns the status of "ContainersReady" condition. -// The status of "ContainersReady" condition is true when all containers are ready. -func GenerateContainersReadyCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition { - // Find if all containers are ready or not. - if containerStatuses == nil { - return v1.PodCondition{ - Type: v1.ContainersReady, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady), - Status: v1.ConditionFalse, - Reason: UnknownContainerStatuses, - } - } - unknownContainers := []string{} - unreadyContainers := []string{} - - for _, container := range pod.Spec.InitContainers { - if !podutil.IsRestartableInitContainer(&container) { - continue - } - - if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok { - if !containerStatus.Ready { - unreadyContainers = append(unreadyContainers, container.Name) - } - } else { - unknownContainers = append(unknownContainers, container.Name) - } - } - - for _, container := range pod.Spec.Containers { - if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok { - if !containerStatus.Ready { - unreadyContainers = append(unreadyContainers, container.Name) - } - } else { - unknownContainers = append(unknownContainers, container.Name) - } - } - - // If all containers are known and succeeded, just return PodCompleted. - if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 { - return generateContainersReadyConditionForTerminalPhase(pod, oldPodStatus, podPhase) - } - - // If the pod phase is failed, explicitly set the ready condition to false for containers since they may be in progress of terminating. - if podPhase == v1.PodFailed { - return generateContainersReadyConditionForTerminalPhase(pod, oldPodStatus, podPhase) - } - - // Generate message for containers in unknown condition. - unreadyMessages := []string{} - if len(unknownContainers) > 0 { - unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers)) - } - if len(unreadyContainers) > 0 { - unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unready status: %s", unreadyContainers)) - } - unreadyMessage := strings.Join(unreadyMessages, ", ") - if unreadyMessage != "" { - return v1.PodCondition{ - Type: v1.ContainersReady, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady), - Status: v1.ConditionFalse, - Reason: ContainersNotReady, - Message: unreadyMessage, - } - } - - return v1.PodCondition{ - Type: v1.ContainersReady, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady), - Status: v1.ConditionTrue, - } -} - -// GeneratePodReadyCondition returns "Ready" condition of a pod. -// The status of "Ready" condition is "True", if all containers in a pod are ready -// AND all matching conditions specified in the ReadinessGates have status equal to "True". -func GeneratePodReadyCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition { - containersReady := GenerateContainersReadyCondition(pod, oldPodStatus, containerStatuses, podPhase) - // If the status of ContainersReady is not True, return the same status, reason and message as ContainersReady. - if containersReady.Status != v1.ConditionTrue { - return v1.PodCondition{ - Type: v1.PodReady, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady), - Status: containersReady.Status, - Reason: containersReady.Reason, - Message: containersReady.Message, - } - } - - // Evaluate corresponding conditions specified in readiness gate - // Generate message if any readiness gate is not satisfied. - unreadyMessages := []string{} - for _, rg := range pod.Spec.ReadinessGates { - _, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType) - if c == nil { - unreadyMessages = append(unreadyMessages, fmt.Sprintf("corresponding condition of pod readiness gate %q does not exist.", string(rg.ConditionType))) - } else if c.Status != v1.ConditionTrue { - unreadyMessages = append(unreadyMessages, fmt.Sprintf("the status of pod readiness gate %q is not \"True\", but %v", string(rg.ConditionType), c.Status)) - } - } - - // Set "Ready" condition to "False" if any readiness gate is not ready. - if len(unreadyMessages) != 0 { - unreadyMessage := strings.Join(unreadyMessages, ", ") - return v1.PodCondition{ - Type: v1.PodReady, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady), - Status: v1.ConditionFalse, - Reason: ReadinessGatesNotReady, - Message: unreadyMessage, - } - } - - return v1.PodCondition{ - Type: v1.PodReady, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady), - Status: v1.ConditionTrue, - } -} - -func isInitContainerInitialized(initContainer *v1.Container, containerStatus *v1.ContainerStatus) bool { - if podutil.IsRestartableInitContainer(initContainer) { - if containerStatus.Started == nil || !*containerStatus.Started { - return false - } - } else { // regular init container - if !containerStatus.Ready { - return false - } - } - return true -} - -// GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it -// returns an uninitialized condition. -func GeneratePodInitializedCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition { - // Find if all containers are ready or not. - if containerStatuses == nil && len(pod.Spec.InitContainers) > 0 { - return v1.PodCondition{ - Type: v1.PodInitialized, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized), - Status: v1.ConditionFalse, - Reason: UnknownContainerStatuses, - } - } - - unknownContainers := []string{} - incompleteContainers := []string{} - for _, container := range pod.Spec.InitContainers { - containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name) - if !ok { - unknownContainers = append(unknownContainers, container.Name) - continue - } - if !isInitContainerInitialized(&container, &containerStatus) { - incompleteContainers = append(incompleteContainers, container.Name) - } - } - - // If all init containers are known and succeeded, just return PodCompleted. - if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 { - return v1.PodCondition{ - Type: v1.PodInitialized, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized), - Status: v1.ConditionTrue, - Reason: PodCompleted, - } - } - - // If there is any regular container that has started, then the pod has - // been initialized before. - // This is needed to handle the case where the pod has been initialized but - // the restartable init containers are restarting. - if kubecontainer.HasAnyRegularContainerStarted(&pod.Spec, containerStatuses) { - return v1.PodCondition{ - Type: v1.PodInitialized, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized), - Status: v1.ConditionTrue, - } - } - - unreadyMessages := make([]string, 0, len(unknownContainers)+len(incompleteContainers)) - if len(unknownContainers) > 0 { - unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers)) - } - if len(incompleteContainers) > 0 { - unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with incomplete status: %s", incompleteContainers)) - } - unreadyMessage := strings.Join(unreadyMessages, ", ") - if unreadyMessage != "" { - return v1.PodCondition{ - Type: v1.PodInitialized, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized), - Status: v1.ConditionFalse, - Reason: ContainersNotInitialized, - Message: unreadyMessage, - } - } - - return v1.PodCondition{ - Type: v1.PodInitialized, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized), - Status: v1.ConditionTrue, - } -} - -func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, podStatus *kubecontainer.PodStatus) v1.PodCondition { - newSandboxNeeded, _, _ := runtimeutil.PodSandboxChanged(pod, podStatus) - // if a new sandbox does not need to be created for a pod, it indicates that - // a sandbox for the pod with networking configured already exists. - // Otherwise, the kubelet needs to invoke the container runtime to create a - // fresh sandbox and configure networking for the sandbox. - if !newSandboxNeeded { - return v1.PodCondition{ - Type: v1.PodReadyToStartContainers, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReadyToStartContainers), - Status: v1.ConditionTrue, - } - } - return v1.PodCondition{ - Type: v1.PodReadyToStartContainers, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReadyToStartContainers), - Status: v1.ConditionFalse, - } -} - -func generateContainersReadyConditionForTerminalPhase(pod *v1.Pod, oldPodStatus *v1.PodStatus, podPhase v1.PodPhase) v1.PodCondition { - condition := v1.PodCondition{ - Type: v1.ContainersReady, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady), - Status: v1.ConditionFalse, - } - - if podPhase == v1.PodFailed { - condition.Reason = PodFailed - } else if podPhase == v1.PodSucceeded { - condition.Reason = PodCompleted - } - - return condition -} - -func generatePodReadyConditionForTerminalPhase(pod *v1.Pod, oldPodStatus *v1.PodStatus, podPhase v1.PodPhase) v1.PodCondition { - condition := v1.PodCondition{ - Type: v1.PodReady, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady), - Status: v1.ConditionFalse, - } - - if podPhase == v1.PodFailed { - condition.Reason = PodFailed - } else if podPhase == v1.PodSucceeded { - condition.Reason = PodCompleted - } - - return condition -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go deleted file mode 100644 index c86bef17ff3..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go +++ /dev/null @@ -1,1195 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//go:generate mockery -package status - -import ( - "context" - "fmt" - "sort" - "strings" - "sync" - "time" - - "github.com/google/go-cmp/cmp" //nolint:depguard - clientset "k8s.io/client-go/kubernetes" - - v1 "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/features" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/metrics" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - kubeutil "k8s.io/kubernetes/pkg/kubelet/util" - statusutil "k8s.io/kubernetes/pkg/util/pod" -) - -// A wrapper around v1.PodStatus that includes a version to enforce that stale pod statuses are -// not sent to the API server. -type versionedPodStatus struct { - // version is a monotonically increasing version number (per pod). - version uint64 - // Pod name & namespace, for sending updates to API server. - podName string - podNamespace string - // at is the time at which the most recent status update was detected - at time.Time - - // True if the status is generated at the end of SyncTerminatedPod, or after it is completed. - podIsFinished bool - - status v1.PodStatus -} - -// Updates pod statuses in apiserver. Writes only when new status has changed. -// All methods are thread-safe. -type manager struct { - kubeClient clientset.Interface - podManager PodManager - // Map from pod UID to sync status of the corresponding pod. - podStatuses map[types.UID]versionedPodStatus - podResizeConditions map[types.UID]podResizeConditions - podStatusesLock sync.RWMutex - podStatusChannel chan struct{} - // Map from (mirror) pod UID to latest status version successfully sent to the API server. - // apiStatusVersions must only be accessed from the sync thread. - apiStatusVersions map[kubetypes.MirrorPodUID]uint64 - podDeletionSafety PodDeletionSafetyProvider - - podStartupLatencyHelper PodStartupLatencyStateHelper -} - -type podResizeConditions struct { - PodResizePending *v1.PodCondition - PodResizeInProgress *v1.PodCondition -} - -func (prc podResizeConditions) List() []*v1.PodCondition { - var conditions []*v1.PodCondition - if prc.PodResizePending != nil { - conditions = append(conditions, prc.PodResizePending) - } - if prc.PodResizeInProgress != nil { - conditions = append(conditions, prc.PodResizeInProgress) - } - return conditions -} - -// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet. -// See pkg/k8s.io/kubernetes/pkg/kubelet/pod.Manager for method godoc. -type PodManager interface { - GetPodByUID(types.UID) (*v1.Pod, bool) - GetMirrorPodByPod(*v1.Pod) (*v1.Pod, bool) - TranslatePodUID(uid types.UID) kubetypes.ResolvedPodUID - GetUIDTranslations() (podToMirror map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID, mirrorToPod map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID) -} - -// PodStatusProvider knows how to provide status for a pod. It is intended to be used by other components -// that need to introspect the authoritative status of a pod. The PodStatusProvider represents the actual -// status of a running pod as the kubelet sees it. -type PodStatusProvider interface { - // GetPodStatus returns the cached status for the provided pod UID, as well as whether it - // was a cache hit. - GetPodStatus(uid types.UID) (v1.PodStatus, bool) -} - -// PodDeletionSafetyProvider provides guarantees that a pod can be safely deleted. -type PodDeletionSafetyProvider interface { - // PodCouldHaveRunningContainers returns true if the pod could have running containers. - PodCouldHaveRunningContainers(pod *v1.Pod) bool -} - -type PodStartupLatencyStateHelper interface { - RecordStatusUpdated(pod *v1.Pod) - DeletePodStartupState(podUID types.UID) -} - -// Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with -// the latest v1.PodStatus. It also syncs updates back to the API server. -type Manager interface { - PodStatusProvider - - // Start the API server status sync loop. - Start() - - // SetPodStatus caches updates the cached status for the given pod, and triggers a status update. - SetPodStatus(pod *v1.Pod, status v1.PodStatus) - - // SetContainerReadiness updates the cached container status with the given readiness, and - // triggers a status update. - SetContainerReadiness(podUID types.UID, containerID kubecontainer.ContainerID, ready bool) - - // SetContainerStartup updates the cached container status with the given startup, and - // triggers a status update. - SetContainerStartup(podUID types.UID, containerID kubecontainer.ContainerID, started bool) - - // TerminatePod resets the container status for the provided pod to terminated and triggers - // a status update. - TerminatePod(pod *v1.Pod) - - // RemoveOrphanedStatuses scans the status cache and removes any entries for pods not included in - // the provided podUIDs. - RemoveOrphanedStatuses(podUIDs map[types.UID]bool) - - // GetPodResizeConditions returns cached PodStatus Resize conditions value - GetPodResizeConditions(podUID types.UID) []*v1.PodCondition - - // SetPodResizePendingCondition caches the last PodResizePending condition for the pod. - SetPodResizePendingCondition(podUID types.UID, reason, message string) - - // SetPodResizeInProgressCondition caches the last PodResizeInProgress condition for the pod. - SetPodResizeInProgressCondition(podUID types.UID, reason, message string, allowReasonToBeCleared bool) - - // ClearPodResizePendingCondition clears the PodResizePending condition for the pod from the cache. - ClearPodResizePendingCondition(podUID types.UID) - - // ClearPodResizeInProgressCondition clears the PodResizeInProgress condition for the pod from the cache. - ClearPodResizeInProgressCondition(podUID types.UID) -} - -const syncPeriod = 10 * time.Second - -// NewManager returns a functional Manager. -func NewManager(kubeClient clientset.Interface, podManager PodManager, podDeletionSafety PodDeletionSafetyProvider, podStartupLatencyHelper PodStartupLatencyStateHelper) Manager { - return &manager{ - kubeClient: kubeClient, - podManager: podManager, - podStatuses: make(map[types.UID]versionedPodStatus), - podResizeConditions: make(map[types.UID]podResizeConditions), - podStatusChannel: make(chan struct{}, 1), - apiStatusVersions: make(map[kubetypes.MirrorPodUID]uint64), - podDeletionSafety: podDeletionSafety, - podStartupLatencyHelper: podStartupLatencyHelper, - } -} - -// isPodStatusByKubeletEqual returns true if the given pod statuses are equal when non-kubelet-owned -// pod conditions are excluded. -// This method normalizes the status before comparing so as to make sure that meaningless -// changes will be ignored. -func isPodStatusByKubeletEqual(oldStatus, status *v1.PodStatus) bool { - oldCopy := oldStatus.DeepCopy() - - newConditions := make(map[v1.PodConditionType]*v1.PodCondition, len(status.Conditions)) - oldConditions := make(map[v1.PodConditionType]*v1.PodCondition, len(oldStatus.Conditions)) - for _, c := range status.Conditions { - if kubetypes.PodConditionByKubelet(c.Type) || kubetypes.PodConditionSharedByKubelet(c.Type) { - newConditions[c.Type] = &c - } - } - for _, c := range oldStatus.Conditions { - if kubetypes.PodConditionByKubelet(c.Type) || kubetypes.PodConditionSharedByKubelet(c.Type) { - oldConditions[c.Type] = &c - } - } - - if len(newConditions) != len(oldConditions) { - return false - } - for _, newCondition := range newConditions { - oldCondition := oldConditions[newCondition.Type] - if oldCondition == nil || oldCondition.Status != newCondition.Status || oldCondition.Message != newCondition.Message || oldCondition.Reason != newCondition.Reason { - return false - } - } - - oldCopy.Conditions = status.Conditions - return apiequality.Semantic.DeepEqual(oldCopy, status) -} - -func (m *manager) Start() { - // Don't start the status manager if we don't have a client. This will happen - // on the master, where the kubelet is responsible for bootstrapping the pods - // of the master components. - if m.kubeClient == nil { - klog.InfoS("Kubernetes client is nil, not starting status manager") - return - } - - klog.InfoS("Starting to sync pod status with apiserver") - - //nolint:staticcheck // SA1015 Ticker can leak since this is only called once and doesn't handle termination. - syncTicker := time.NewTicker(syncPeriod).C - - // syncPod and syncBatch share the same go routine to avoid sync races. - go wait.Forever(func() { - for { - select { - case <-m.podStatusChannel: - klog.V(4).InfoS("Syncing updated statuses") - m.syncBatch(false) - case <-syncTicker: - klog.V(4).InfoS("Syncing all statuses") - m.syncBatch(true) - } - } - }, 0) -} - -// GetPodResizeConditions returns the last cached ResizeStatus value. -func (m *manager) GetPodResizeConditions(podUID types.UID) []*v1.PodCondition { - m.podStatusesLock.RLock() - defer m.podStatusesLock.RUnlock() - return m.podResizeConditions[podUID].List() -} - -// SetPodResizePendingCondition caches the last PodResizePending condition for the pod. -func (m *manager) SetPodResizePendingCondition(podUID types.UID, reason, message string) { - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - - m.podResizeConditions[podUID] = podResizeConditions{ - PodResizePending: updatedPodResizeCondition(v1.PodResizePending, m.podResizeConditions[podUID].PodResizePending, reason, message), - PodResizeInProgress: m.podResizeConditions[podUID].PodResizeInProgress, - } -} - -// SetPodResizeInProgressCondition caches the last PodResizeInProgress condition for the pod. -func (m *manager) SetPodResizeInProgressCondition(podUID types.UID, reason, message string, allowReasonToBeCleared bool) { - oldConditions := m.GetPodResizeConditions(podUID) - - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - - if !allowReasonToBeCleared && reason == "" && message == "" { - // Preserve the old reason and message if there is one. - for _, c := range oldConditions { - if c.Type == v1.PodResizeInProgress { - reason = c.Reason - message = c.Message - } - } - } - - m.podResizeConditions[podUID] = podResizeConditions{ - PodResizeInProgress: updatedPodResizeCondition(v1.PodResizeInProgress, m.podResizeConditions[podUID].PodResizeInProgress, reason, message), - PodResizePending: m.podResizeConditions[podUID].PodResizePending, - } -} - -// ClearPodResizePendingCondition clears the PodResizePending condition for the pod from the cache. -func (m *manager) ClearPodResizePendingCondition(podUID types.UID) { - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - m.podResizeConditions[podUID] = podResizeConditions{ - PodResizePending: nil, - PodResizeInProgress: m.podResizeConditions[podUID].PodResizeInProgress, - } -} - -// ClearPodResizeInProgressCondition clears the PodResizeInProgress condition for the pod from the cache. -func (m *manager) ClearPodResizeInProgressCondition(podUID types.UID) { - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - m.podResizeConditions[podUID] = podResizeConditions{ - PodResizePending: m.podResizeConditions[podUID].PodResizePending, - PodResizeInProgress: nil, - } -} - -func (m *manager) GetPodStatus(uid types.UID) (v1.PodStatus, bool) { - m.podStatusesLock.RLock() - defer m.podStatusesLock.RUnlock() - status, ok := m.podStatuses[types.UID(m.podManager.TranslatePodUID(uid))] - return status.status, ok -} - -func (m *manager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) { - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - - // Make sure we're caching a deep copy. - status = *status.DeepCopy() - - // Set the observedGeneration for this pod status. - status.ObservedGeneration = podutil.GetPodObservedGenerationIfEnabled(pod) - - // Force a status update if deletion timestamp is set. This is necessary - // because if the pod is in the non-running state, the pod worker still - // needs to be able to trigger an update and/or deletion. - m.updateStatusInternal(pod, status, pod.DeletionTimestamp != nil, false) -} - -func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontainer.ContainerID, ready bool) { - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - - pod, ok := m.podManager.GetPodByUID(podUID) - if !ok { - klog.V(4).InfoS("Pod has been deleted, no need to update readiness", "podUID", string(podUID)) - return - } - - oldStatus, found := m.podStatuses[pod.UID] - if !found { - klog.InfoS("Container readiness changed before pod has synced", - "pod", klog.KObj(pod), - "containerID", containerID.String()) - return - } - - // Find the container to update. - containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String()) - if !ok { - klog.InfoS("Container readiness changed for unknown container", - "pod", klog.KObj(pod), - "containerID", containerID.String()) - return - } - - if containerStatus.Ready == ready { - klog.V(4).InfoS("Container readiness unchanged", - "ready", ready, - "pod", klog.KObj(pod), - "containerID", containerID.String()) - return - } - - // Make sure we're not updating the cached version. - status := *oldStatus.status.DeepCopy() - containerStatus, _, _ = findContainerStatus(&status, containerID.String()) - containerStatus.Ready = ready - - // updateConditionFunc updates the corresponding type of condition - updateConditionFunc := func(conditionType v1.PodConditionType, condition v1.PodCondition) { - conditionIndex := -1 - for i, condition := range status.Conditions { - if condition.Type == conditionType { - conditionIndex = i - break - } - } - if conditionIndex != -1 { - status.Conditions[conditionIndex] = condition - } else { - klog.InfoS("PodStatus missing condition type", "conditionType", conditionType, "status", status) - status.Conditions = append(status.Conditions, condition) - } - } - - allContainerStatuses := append(status.InitContainerStatuses, status.ContainerStatuses...) - updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(pod, &oldStatus.status, status.Conditions, allContainerStatuses, status.Phase)) - updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(pod, &oldStatus.status, allContainerStatuses, status.Phase)) - m.updateStatusInternal(pod, status, false, false) -} - -func (m *manager) SetContainerStartup(podUID types.UID, containerID kubecontainer.ContainerID, started bool) { - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - - pod, ok := m.podManager.GetPodByUID(podUID) - if !ok { - klog.V(4).InfoS("Pod has been deleted, no need to update startup", "podUID", string(podUID)) - return - } - - oldStatus, found := m.podStatuses[pod.UID] - if !found { - klog.InfoS("Container startup changed before pod has synced", - "pod", klog.KObj(pod), - "containerID", containerID.String()) - return - } - - // Find the container to update. - containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String()) - if !ok { - klog.InfoS("Container startup changed for unknown container", - "pod", klog.KObj(pod), - "containerID", containerID.String()) - return - } - - if containerStatus.Started != nil && *containerStatus.Started == started { - klog.V(4).InfoS("Container startup unchanged", - "pod", klog.KObj(pod), - "containerID", containerID.String()) - return - } - - // Make sure we're not updating the cached version. - status := *oldStatus.status.DeepCopy() - containerStatus, _, _ = findContainerStatus(&status, containerID.String()) - containerStatus.Started = &started - - m.updateStatusInternal(pod, status, false, false) -} - -func findContainerStatus(status *v1.PodStatus, containerID string) (containerStatus *v1.ContainerStatus, init bool, ok bool) { - // Find the container to update. - for i, c := range status.ContainerStatuses { - if c.ContainerID == containerID { - return &status.ContainerStatuses[i], false, true - } - } - - for i, c := range status.InitContainerStatuses { - if c.ContainerID == containerID { - return &status.InitContainerStatuses[i], true, true - } - } - - return nil, false, false - -} - -// TerminatePod ensures that the status of containers is properly defaulted at the end of the pod -// lifecycle. As the Kubelet must reconcile with the container runtime to observe container status -// there is always the possibility we are unable to retrieve one or more container statuses due to -// garbage collection, admin action, or loss of temporary data on a restart. This method ensures -// that any absent container status is treated as a failure so that we do not incorrectly describe -// the pod as successful. If we have not yet initialized the pod in the presence of init containers, -// the init container failure status is sufficient to describe the pod as failing, and we do not need -// to override waiting containers (unless there is evidence the pod previously started those containers). -// It also makes sure that pods are transitioned to a terminal phase (Failed or Succeeded) before -// their deletion. -func (m *manager) TerminatePod(pod *v1.Pod) { - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - - // ensure that all containers have a terminated state - because we do not know whether the container - // was successful, always report an error - oldStatus := &pod.Status - cachedStatus, isCached := m.podStatuses[pod.UID] - if isCached { - oldStatus = &cachedStatus.status - } - status := *oldStatus.DeepCopy() - - // once a pod has initialized, any missing status is treated as a failure - if hasPodInitialized(pod) { - for i := range status.ContainerStatuses { - if status.ContainerStatuses[i].State.Terminated != nil { - continue - } - status.ContainerStatuses[i].State = v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - Reason: kubecontainer.ContainerReasonStatusUnknown, - Message: "The container could not be located when the pod was terminated", - ExitCode: 137, - }, - } - } - } - - // all but the final suffix of init containers which have no evidence of a container start are - // marked as failed containers - for i := range initializedContainers(status.InitContainerStatuses) { - if status.InitContainerStatuses[i].State.Terminated != nil { - continue - } - status.InitContainerStatuses[i].State = v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - Reason: kubecontainer.ContainerReasonStatusUnknown, - Message: "The container could not be located when the pod was terminated", - ExitCode: 137, - }, - } - } - - // Make sure all pods are transitioned to a terminal phase. - // TODO(#116484): Also assign terminal phase to static an pods. - if !kubetypes.IsStaticPod(pod) { - switch status.Phase { - case v1.PodSucceeded, v1.PodFailed: - // do nothing, already terminal - case v1.PodPending, v1.PodRunning: - if status.Phase == v1.PodRunning && isCached { - klog.InfoS("Terminal running pod should have already been marked as failed, programmer error", "pod", klog.KObj(pod), "podUID", pod.UID) - } - klog.V(3).InfoS("Marking terminal pod as failed", "oldPhase", status.Phase, "pod", klog.KObj(pod), "podUID", pod.UID) - status.Phase = v1.PodFailed - default: - klog.ErrorS(fmt.Errorf("unknown phase: %v", status.Phase), "Unknown phase, programmer error", "pod", klog.KObj(pod), "podUID", pod.UID) - status.Phase = v1.PodFailed - } - } - - klog.V(5).InfoS("TerminatePod calling updateStatusInternal", "pod", klog.KObj(pod), "podUID", pod.UID) - m.updateStatusInternal(pod, status, true, true) -} - -// hasPodInitialized returns true if the pod has no evidence of ever starting a regular container, which -// implies those containers should not be transitioned to terminated status. -func hasPodInitialized(pod *v1.Pod) bool { - // a pod without init containers is always initialized - if len(pod.Spec.InitContainers) == 0 { - return true - } - // if any container has ever moved out of waiting state, the pod has initialized - for _, status := range pod.Status.ContainerStatuses { - if status.LastTerminationState.Terminated != nil || status.State.Waiting == nil { - return true - } - } - // if the last init container has ever completed with a zero exit code, the pod is initialized - if l := len(pod.Status.InitContainerStatuses); l > 0 { - container, ok := kubeutil.GetContainerByIndex(pod.Spec.InitContainers, pod.Status.InitContainerStatuses, l-1) - if !ok { - klog.V(4).InfoS("Mismatch between pod spec and status, likely programmer error", "pod", klog.KObj(pod), "containerName", container.Name) - return false - } - - containerStatus := pod.Status.InitContainerStatuses[l-1] - if podutil.IsRestartableInitContainer(&container) { - if containerStatus.State.Running != nil && - containerStatus.Started != nil && *containerStatus.Started { - return true - } - } else { // regular init container - if state := containerStatus.LastTerminationState; state.Terminated != nil && state.Terminated.ExitCode == 0 { - return true - } - if state := containerStatus.State; state.Terminated != nil && state.Terminated.ExitCode == 0 { - return true - } - } - } - // otherwise the pod has no record of being initialized - return false -} - -// initializedContainers returns all status except for suffix of containers that are in Waiting -// state, which is the set of containers that have attempted to start at least once. If all containers -// are Waiting, the first container is always returned. -func initializedContainers(containers []v1.ContainerStatus) []v1.ContainerStatus { - for i := len(containers) - 1; i >= 0; i-- { - if containers[i].State.Waiting == nil || containers[i].LastTerminationState.Terminated != nil { - return containers[0 : i+1] - } - } - // always return at least one container - if len(containers) > 0 { - return containers[0:1] - } - return nil -} - -// checkContainerStateTransition ensures that no container is trying to transition -// from a terminated to non-terminated state, which is illegal and indicates a -// logical error in the kubelet. -func checkContainerStateTransition(oldStatuses, newStatuses *v1.PodStatus, podSpec *v1.PodSpec) error { - // If we should always restart, containers are allowed to leave the terminated state - if podSpec.RestartPolicy == v1.RestartPolicyAlways { - return nil - } - for _, oldStatus := range oldStatuses.ContainerStatuses { - // Skip any container that wasn't terminated - if oldStatus.State.Terminated == nil { - continue - } - // Skip any container that failed but is allowed to restart - if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == v1.RestartPolicyOnFailure { - continue - } - for _, newStatus := range newStatuses.ContainerStatuses { - if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil { - return fmt.Errorf("terminated container %v attempted illegal transition to non-terminated state", newStatus.Name) - } - } - } - - for i, oldStatus := range oldStatuses.InitContainerStatuses { - initContainer, ok := kubeutil.GetContainerByIndex(podSpec.InitContainers, oldStatuses.InitContainerStatuses, i) - if !ok { - return fmt.Errorf("found mismatch between pod spec and status, container: %v", oldStatus.Name) - } - // Skip any restartable init container as it always is allowed to restart - if podutil.IsRestartableInitContainer(&initContainer) { - continue - } - // Skip any container that wasn't terminated - if oldStatus.State.Terminated == nil { - continue - } - // Skip any container that failed but is allowed to restart - if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == v1.RestartPolicyOnFailure { - continue - } - for _, newStatus := range newStatuses.InitContainerStatuses { - if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil { - return fmt.Errorf("terminated init container %v attempted illegal transition to non-terminated state", newStatus.Name) - } - } - } - return nil -} - -// updateStatusInternal updates the internal status cache, and queues an update to the api server if -// necessary. -// This method IS NOT THREAD SAFE and must be called from a locked function. -func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUpdate, podIsFinished bool) { - var oldStatus v1.PodStatus - cachedStatus, isCached := m.podStatuses[pod.UID] - if isCached { - oldStatus = cachedStatus.status - // TODO(#116484): Also assign terminal phase to static pods. - if !kubetypes.IsStaticPod(pod) { - if cachedStatus.podIsFinished && !podIsFinished { - klog.InfoS("Got unexpected podIsFinished=false, while podIsFinished=true in status cache, programmer error.", "pod", klog.KObj(pod)) - podIsFinished = true - } - } - } else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok { - oldStatus = mirrorPod.Status - } else { - oldStatus = pod.Status - } - - // Check for illegal state transition in containers - if err := checkContainerStateTransition(&oldStatus, &status, &pod.Spec); err != nil { - klog.ErrorS(err, "Status update on pod aborted", "pod", klog.KObj(pod)) - return - } - - // Set ContainersReadyCondition.LastTransitionTime. - updateLastTransitionTime(&status, &oldStatus, v1.ContainersReady) - - // Set ReadyCondition.LastTransitionTime. - updateLastTransitionTime(&status, &oldStatus, v1.PodReady) - - // Set InitializedCondition.LastTransitionTime. - updateLastTransitionTime(&status, &oldStatus, v1.PodInitialized) - - // Set PodReadyToStartContainersCondition.LastTransitionTime. - updateLastTransitionTime(&status, &oldStatus, v1.PodReadyToStartContainers) - - // Set PodScheduledCondition.LastTransitionTime. - updateLastTransitionTime(&status, &oldStatus, v1.PodScheduled) - - // Set DisruptionTarget.LastTransitionTime. - updateLastTransitionTime(&status, &oldStatus, v1.DisruptionTarget) - - // ensure that the start time does not change across updates. - if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() { - status.StartTime = oldStatus.StartTime - } else if status.StartTime.IsZero() { - // if the status has no start time, we need to set an initial time - now := metav1.Now() - status.StartTime = &now - } - - // prevent sending unnecessary patches - if oldStatus.ObservedGeneration > status.ObservedGeneration { - status.ObservedGeneration = oldStatus.ObservedGeneration - } - - normalizeStatus(pod, &status) - - // Perform some more extensive logging of container termination state to assist in - // debugging production races (generally not needed). - if klogV := klog.V(5); klogV.Enabled() { - var containers []string - for _, s := range append(append([]v1.ContainerStatus(nil), status.InitContainerStatuses...), status.ContainerStatuses...) { - var current, previous string - switch { - case s.State.Running != nil: - current = "running" - case s.State.Waiting != nil: - current = "waiting" - case s.State.Terminated != nil: - current = fmt.Sprintf("terminated=%d", s.State.Terminated.ExitCode) - default: - current = "unknown" - } - switch { - case s.LastTerminationState.Running != nil: - previous = "running" - case s.LastTerminationState.Waiting != nil: - previous = "waiting" - case s.LastTerminationState.Terminated != nil: - previous = fmt.Sprintf("terminated=%d", s.LastTerminationState.Terminated.ExitCode) - default: - previous = "" - } - containers = append(containers, fmt.Sprintf("(%s state=%s previous=%s)", s.Name, current, previous)) - } - sort.Strings(containers) - klogV.InfoS("updateStatusInternal", "version", cachedStatus.version+1, "podIsFinished", podIsFinished, "pod", klog.KObj(pod), "podUID", pod.UID, "containers", strings.Join(containers, " ")) - } - - // The intent here is to prevent concurrent updates to a pod's status from - // clobbering each other so the phase of a pod progresses monotonically. - if isCached && isPodStatusByKubeletEqual(&cachedStatus.status, &status) && !forceUpdate { - klog.V(3).InfoS("Ignoring same status for pod", "pod", klog.KObj(pod), "status", status) - return - } - - newStatus := versionedPodStatus{ - status: status, - version: cachedStatus.version + 1, - podName: pod.Name, - podNamespace: pod.Namespace, - podIsFinished: podIsFinished, - } - - // Multiple status updates can be generated before we update the API server, - // so we track the time from the first status update until we retire it to - // the API. - if cachedStatus.at.IsZero() { - newStatus.at = time.Now() - } else { - newStatus.at = cachedStatus.at - } - - m.podStatuses[pod.UID] = newStatus - - select { - case m.podStatusChannel <- struct{}{}: - default: - // there's already a status update pending - } -} - -// updateLastTransitionTime updates the LastTransitionTime of a pod condition. -func updateLastTransitionTime(status, oldStatus *v1.PodStatus, conditionType v1.PodConditionType) { - _, condition := podutil.GetPodCondition(status, conditionType) - if condition == nil { - return - } - // Need to set LastTransitionTime. - lastTransitionTime := metav1.Now() - _, oldCondition := podutil.GetPodCondition(oldStatus, conditionType) - if oldCondition != nil && condition.Status == oldCondition.Status { - lastTransitionTime = oldCondition.LastTransitionTime - } - condition.LastTransitionTime = lastTransitionTime -} - -// deletePodStatus simply removes the given pod from the status cache. -func (m *manager) deletePodStatus(uid types.UID) { - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - delete(m.podStatuses, uid) - m.podStartupLatencyHelper.DeletePodStartupState(uid) - if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - delete(m.podResizeConditions, uid) - } -} - -// TODO(filipg): It'd be cleaner if we can do this without signal from user. -func (m *manager) RemoveOrphanedStatuses(podUIDs map[types.UID]bool) { - m.podStatusesLock.Lock() - defer m.podStatusesLock.Unlock() - for key := range m.podStatuses { - if _, ok := podUIDs[key]; !ok { - klog.V(5).InfoS("Removing pod from status map.", "podUID", key) - delete(m.podStatuses, key) - if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - delete(m.podResizeConditions, key) - } - } - } -} - -// syncBatch syncs pods statuses with the apiserver. Returns the number of syncs -// attempted for testing. -func (m *manager) syncBatch(all bool) int { - type podSync struct { - podUID types.UID - statusUID kubetypes.MirrorPodUID - status versionedPodStatus - } - - var updatedStatuses []podSync - podToMirror, mirrorToPod := m.podManager.GetUIDTranslations() - func() { // Critical section - m.podStatusesLock.RLock() - defer m.podStatusesLock.RUnlock() - - // Clean up orphaned versions. - if all { - for uid := range m.apiStatusVersions { - _, hasPod := m.podStatuses[types.UID(uid)] - _, hasMirror := mirrorToPod[uid] - if !hasPod && !hasMirror { - delete(m.apiStatusVersions, uid) - } - } - } - - // Decide which pods need status updates. - for uid, status := range m.podStatuses { - // translate the pod UID (source) to the status UID (API pod) - - // static pods are identified in source by pod UID but tracked in the - // API via the uid of the mirror pod - uidOfStatus := kubetypes.MirrorPodUID(uid) - if mirrorUID, ok := podToMirror[kubetypes.ResolvedPodUID(uid)]; ok { - if mirrorUID == "" { - klog.V(5).InfoS("Static pod does not have a corresponding mirror pod; skipping", - "podUID", uid, - "pod", klog.KRef(status.podNamespace, status.podName)) - continue - } - uidOfStatus = mirrorUID - } - - // if a new status update has been delivered, trigger an update, otherwise the - // pod can wait for the next bulk check (which performs reconciliation as well) - if !all { - if m.apiStatusVersions[uidOfStatus] >= status.version { - continue - } - updatedStatuses = append(updatedStatuses, podSync{uid, uidOfStatus, status}) - continue - } - - // Ensure that any new status, or mismatched status, or pod that is ready for - // deletion gets updated. If a status update fails we retry the next time any - // other pod is updated. - if m.needsUpdate(types.UID(uidOfStatus), status) { - updatedStatuses = append(updatedStatuses, podSync{uid, uidOfStatus, status}) - } else if m.needsReconcile(uid, status.status) { - // Delete the apiStatusVersions here to force an update on the pod status - // In most cases the deleted apiStatusVersions here should be filled - // soon after the following syncPod() [If the syncPod() sync an update - // successfully]. - delete(m.apiStatusVersions, uidOfStatus) - updatedStatuses = append(updatedStatuses, podSync{uid, uidOfStatus, status}) - } - } - }() - - for _, update := range updatedStatuses { - klog.V(5).InfoS("Sync pod status", "podUID", update.podUID, "statusUID", update.statusUID, "version", update.status.version) - m.syncPod(update.podUID, update.status) - } - - return len(updatedStatuses) -} - -// syncPod syncs the given status with the API server. The caller must not hold the status lock. -func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { - // TODO: make me easier to express from client code - pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(context.TODO(), status.podName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - klog.V(3).InfoS("Pod does not exist on the server", - "podUID", uid, - "pod", klog.KRef(status.podNamespace, status.podName)) - // If the Pod is deleted the status will be cleared in - // RemoveOrphanedStatuses, so we just ignore the update here. - return - } - if err != nil { - klog.InfoS("Failed to get status for pod", - "podUID", uid, - "pod", klog.KRef(status.podNamespace, status.podName), - "err", err) - return - } - - translatedUID := m.podManager.TranslatePodUID(pod.UID) - // Type convert original uid just for the purpose of comparison. - if len(translatedUID) > 0 && translatedUID != kubetypes.ResolvedPodUID(uid) { - klog.V(2).InfoS("Pod was deleted and then recreated, skipping status update", - "pod", klog.KObj(pod), - "oldPodUID", uid, - "podUID", translatedUID) - m.deletePodStatus(uid) - return - } - - mergedStatus := mergePodStatus(pod, pod.Status, status.status, m.podDeletionSafety.PodCouldHaveRunningContainers(pod)) - - newPod, patchBytes, unchanged, err := statusutil.PatchPodStatus(context.TODO(), m.kubeClient, pod.Namespace, pod.Name, pod.UID, pod.Status, mergedStatus) - klog.V(3).InfoS("Patch status for pod", "pod", klog.KObj(pod), "podUID", uid, "patch", string(patchBytes)) - - if err != nil { - klog.InfoS("Failed to update status for pod", "pod", klog.KObj(pod), "err", err) - return - } - if unchanged { - klog.V(3).InfoS("Status for pod is up-to-date", "pod", klog.KObj(pod), "statusVersion", status.version) - } else { - klog.V(3).InfoS("Status for pod updated successfully", "pod", klog.KObj(pod), "statusVersion", status.version, "status", mergedStatus) - pod = newPod - // We pass a new object (result of API call which contains updated ResourceVersion) - m.podStartupLatencyHelper.RecordStatusUpdated(pod) - } - - // measure how long the status update took to propagate from generation to update on the server - if status.at.IsZero() { - klog.V(3).InfoS("Pod had no status time set", "pod", klog.KObj(pod), "podUID", uid, "version", status.version) - } else { - duration := time.Since(status.at).Truncate(time.Millisecond) - metrics.PodStatusSyncDuration.Observe(duration.Seconds()) - } - - m.apiStatusVersions[kubetypes.MirrorPodUID(pod.UID)] = status.version - - // We don't handle graceful deletion of mirror pods. - if m.canBeDeleted(pod, status.status, status.podIsFinished) { - deleteOptions := metav1.DeleteOptions{ - GracePeriodSeconds: new(int64), - // Use the pod UID as the precondition for deletion to prevent deleting a - // newly created pod with the same name and namespace. - Preconditions: metav1.NewUIDPreconditions(string(pod.UID)), - } - err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions) - if err != nil { - klog.InfoS("Failed to delete status for pod", "pod", klog.KObj(pod), "err", err) - return - } - klog.V(3).InfoS("Pod fully terminated and removed from etcd", "pod", klog.KObj(pod)) - m.deletePodStatus(uid) - } -} - -// needsUpdate returns whether the status is stale for the given pod UID. -// This method is not thread safe, and must only be accessed by the sync thread. -func (m *manager) needsUpdate(uid types.UID, status versionedPodStatus) bool { - latest, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(uid)] - if !ok || latest < status.version { - return true - } - pod, ok := m.podManager.GetPodByUID(uid) - if !ok { - return false - } - return m.canBeDeleted(pod, status.status, status.podIsFinished) -} - -func (m *manager) canBeDeleted(pod *v1.Pod, status v1.PodStatus, podIsFinished bool) bool { - if pod.DeletionTimestamp == nil || kubetypes.IsMirrorPod(pod) { - return false - } - // Delay deletion of pods until the phase is terminal, based on pod.Status - // which comes from pod manager. - if !podutil.IsPodPhaseTerminal(pod.Status.Phase) { - // For debugging purposes we also log the kubelet's local phase, when the deletion is delayed. - klog.V(3).InfoS("Delaying pod deletion as the phase is non-terminal", "phase", pod.Status.Phase, "localPhase", status.Phase, "pod", klog.KObj(pod), "podUID", pod.UID) - return false - } - // If this is an update completing pod termination then we know the pod termination is finished. - if podIsFinished { - klog.V(3).InfoS("The pod termination is finished as SyncTerminatedPod completes its execution", "phase", pod.Status.Phase, "localPhase", status.Phase, "pod", klog.KObj(pod), "podUID", pod.UID) - return true - } - return false -} - -// needsReconcile compares the given status with the status in the pod manager (which -// in fact comes from apiserver), returns whether the status needs to be reconciled with -// the apiserver. Now when pod status is inconsistent between apiserver and kubelet, -// kubelet should forcibly send an update to reconcile the inconsistence, because kubelet -// should be the source of truth of pod status. -// NOTE(random-liu): It's simpler to pass in mirror pod uid and get mirror pod by uid, but -// now the pod manager only supports getting mirror pod by static pod, so we have to pass -// static pod uid here. -// TODO(random-liu): Simplify the logic when mirror pod manager is added. -func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { - // The pod could be a static pod, so we should translate first. - pod, ok := m.podManager.GetPodByUID(uid) - if !ok { - klog.V(4).InfoS("Pod has been deleted, no need to reconcile", "podUID", string(uid)) - return false - } - // If the pod is a static pod, we should check its mirror pod, because only status in mirror pod is meaningful to us. - if kubetypes.IsStaticPod(pod) { - mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod) - if !ok { - klog.V(4).InfoS("Static pod has no corresponding mirror pod, no need to reconcile", "pod", klog.KObj(pod)) - return false - } - pod = mirrorPod - } - - podStatus := pod.Status.DeepCopy() - normalizeStatus(pod, podStatus) - - if isPodStatusByKubeletEqual(podStatus, &status) { - // If the status from the source is the same with the cached status, - // reconcile is not needed. Just return. - return false - } - klog.V(3).InfoS("Pod status is inconsistent with cached status for pod, a reconciliation should be triggered", - "pod", klog.KObj(pod), - "statusDiff", cmp.Diff(podStatus, &status)) - - return true -} - -// normalizeStatus normalizes nanosecond precision timestamps in podStatus -// down to second precision (*RFC339NANO* -> *RFC3339*). This must be done -// before comparing podStatus to the status returned by apiserver because -// apiserver does not support RFC339NANO. -// Related issue #15262/PR #15263 to move apiserver to RFC339NANO is closed. -func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus { - bytesPerStatus := kubecontainer.MaxPodTerminationMessageLogLength - if containers := len(pod.Spec.Containers) + len(pod.Spec.InitContainers) + len(pod.Spec.EphemeralContainers); containers > 0 { - bytesPerStatus = bytesPerStatus / containers - } - normalizeTimeStamp := func(t *metav1.Time) { - *t = t.Rfc3339Copy() - } - normalizeContainerState := func(c *v1.ContainerState) { - if c.Running != nil { - normalizeTimeStamp(&c.Running.StartedAt) - } - if c.Terminated != nil { - normalizeTimeStamp(&c.Terminated.StartedAt) - normalizeTimeStamp(&c.Terminated.FinishedAt) - if len(c.Terminated.Message) > bytesPerStatus { - c.Terminated.Message = c.Terminated.Message[:bytesPerStatus] - } - } - } - - if status.StartTime != nil { - normalizeTimeStamp(status.StartTime) - } - for i := range status.Conditions { - condition := &status.Conditions[i] - normalizeTimeStamp(&condition.LastProbeTime) - normalizeTimeStamp(&condition.LastTransitionTime) - } - - normalizeContainerStatuses := func(containerStatuses []v1.ContainerStatus) { - for i := range containerStatuses { - cstatus := &containerStatuses[i] - normalizeContainerState(&cstatus.State) - normalizeContainerState(&cstatus.LastTerminationState) - } - } - - normalizeContainerStatuses(status.ContainerStatuses) - sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses)) - - normalizeContainerStatuses(status.InitContainerStatuses) - kubetypes.SortInitContainerStatuses(pod, status.InitContainerStatuses) - - normalizeContainerStatuses(status.EphemeralContainerStatuses) - sort.Sort(kubetypes.SortedContainerStatuses(status.EphemeralContainerStatuses)) - - return status -} - -// mergePodStatus merges oldPodStatus and newPodStatus to preserve where pod conditions -// not owned by kubelet and to ensure terminal phase transition only happens after all -// running containers have terminated. This method does not modify the old status. -func mergePodStatus(pod *v1.Pod, oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningContainers bool) v1.PodStatus { - podConditions := make([]v1.PodCondition, 0, len(oldPodStatus.Conditions)+len(newPodStatus.Conditions)) - - for _, c := range oldPodStatus.Conditions { - if !kubetypes.PodConditionByKubelet(c.Type) { - podConditions = append(podConditions, c) - } - } - - transitioningToTerminalPhase := !podutil.IsPodPhaseTerminal(oldPodStatus.Phase) && podutil.IsPodPhaseTerminal(newPodStatus.Phase) - - for _, c := range newPodStatus.Conditions { - if kubetypes.PodConditionByKubelet(c.Type) { - podConditions = append(podConditions, c) - } else if kubetypes.PodConditionSharedByKubelet(c.Type) { - // we replace or append all the "shared by kubelet" conditions - if c.Type == v1.DisruptionTarget { - // guard the update of the DisruptionTarget condition with a check to ensure - // it will only be sent once all containers have terminated and the phase - // is terminal. This avoids sending an unnecessary patch request to add - // the condition if the actual status phase transition is delayed. - if transitioningToTerminalPhase && !couldHaveRunningContainers { - // update the LastTransitionTime again here because the older transition - // time set in updateStatusInternal is likely stale as sending of - // the condition was delayed until all pod's containers have terminated. - updateLastTransitionTime(&newPodStatus, &oldPodStatus, c.Type) - if _, c := podutil.GetPodConditionFromList(newPodStatus.Conditions, c.Type); c != nil { - // for shared conditions we update or append in podConditions - podConditions = statusutil.ReplaceOrAppendPodCondition(podConditions, c) - } - } - } - } - } - newPodStatus.Conditions = podConditions - - // ResourceClaimStatuses is not owned and not modified by kubelet. - newPodStatus.ResourceClaimStatuses = oldPodStatus.ResourceClaimStatuses - - // Delay transitioning a pod to a terminal status unless the pod is actually terminal. - // The Kubelet should never transition a pod to terminal status that could have running - // containers and thus actively be leveraging exclusive resources. Note that resources - // like volumes are reconciled by a subsystem in the Kubelet and will converge if a new - // pod reuses an exclusive resource (unmount -> free -> mount), which means we do not - // need wait for those resources to be detached by the Kubelet. In general, resources - // the Kubelet exclusively owns must be released prior to a pod being reported terminal, - // while resources that have participanting components above the API use the pod's - // transition to a terminal phase (or full deletion) to release those resources. - if transitioningToTerminalPhase { - if couldHaveRunningContainers { - newPodStatus.Phase = oldPodStatus.Phase - newPodStatus.Reason = oldPodStatus.Reason - newPodStatus.Message = oldPodStatus.Message - } - } - - // If the new phase is terminal, explicitly set the ready condition to false for v1.PodReady and v1.ContainersReady. - // It may take some time for kubelet to reconcile the ready condition, so explicitly set ready conditions to false if the phase is terminal. - // This is done to ensure kubelet does not report a status update with terminal pod phase and ready=true. - // See https://issues.k8s.io/108594 for more details. - if podutil.IsPodPhaseTerminal(newPodStatus.Phase) { - if podutil.IsPodReadyConditionTrue(newPodStatus) || podutil.IsContainersReadyConditionTrue(newPodStatus) { - containersReadyCondition := generateContainersReadyConditionForTerminalPhase(pod, &oldPodStatus, newPodStatus.Phase) - podutil.UpdatePodCondition(&newPodStatus, &containersReadyCondition) - - podReadyCondition := generatePodReadyConditionForTerminalPhase(pod, &oldPodStatus, newPodStatus.Phase) - podutil.UpdatePodCondition(&newPodStatus, &podReadyCondition) - } - } - - return newPodStatus -} - -// NeedToReconcilePodReadiness returns if the pod "Ready" condition need to be reconcile -func NeedToReconcilePodReadiness(pod *v1.Pod) bool { - if len(pod.Spec.ReadinessGates) == 0 { - return false - } - podReadyCondition := GeneratePodReadyCondition(pod, &pod.Status, pod.Status.Conditions, pod.Status.ContainerStatuses, pod.Status.Phase) - i, curCondition := podutil.GetPodConditionFromList(pod.Status.Conditions, v1.PodReady) - // Only reconcile if "Ready" condition is present and Status or Message is not expected - if i >= 0 && (curCondition.Status != podReadyCondition.Status || curCondition.Message != podReadyCondition.Message) { - return true - } - return false -} - -func updatedPodResizeCondition(conditionType v1.PodConditionType, oldCondition *v1.PodCondition, reason, message string) *v1.PodCondition { - now := metav1.NewTime(time.Now()) - var lastTransitionTime metav1.Time - if oldCondition == nil || oldCondition.Reason != reason { - lastTransitionTime = now - } else { - lastTransitionTime = oldCondition.LastTransitionTime - } - - return &v1.PodCondition{ - Type: conditionType, - Status: v1.ConditionTrue, - LastProbeTime: now, - LastTransitionTime: lastTransitionTime, - Reason: reason, - Message: message, - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go deleted file mode 100644 index 791052dbbce..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -const ( - // ResolvConfDefault is the system default DNS resolver configuration. - ResolvConfDefault = "/etc/resolv.conf" -) - -// User visible keys for managing node allocatable enforcement on the node. -const ( - NodeAllocatableEnforcementKey = "pods" - SystemReservedEnforcementKey = "system-reserved" - SystemReservedCompressibleEnforcementKey = "system-reserved-compressible" - KubeReservedEnforcementKey = "kube-reserved" - KubeReservedCompressibleEnforcementKey = "kube-reserved-compressible" - NodeAllocatableNoneKey = "none" -) - -// SwapBehavior types -type SwapBehavior string - -const ( - LimitedSwap SwapBehavior = "LimitedSwap" - NoSwap SwapBehavior = "NoSwap" -) diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go deleted file mode 100644 index 9227a16ef40..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package types contains common types in the Kubelet. -package types diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_status.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_status.go deleted file mode 100644 index 24612e40df5..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_status.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - v1 "k8s.io/api/core/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/features" -) - -// PodConditionsByKubelet is the list of pod conditions owned by kubelet -var PodConditionsByKubelet = []v1.PodConditionType{ - v1.PodScheduled, - v1.PodReady, - v1.PodInitialized, - v1.ContainersReady, - v1.PodResizeInProgress, - v1.PodResizePending, -} - -// PodConditionByKubelet returns if the pod condition type is owned by kubelet -func PodConditionByKubelet(conditionType v1.PodConditionType) bool { - for _, c := range PodConditionsByKubelet { - if c == conditionType { - return true - } - } - if utilfeature.DefaultFeatureGate.Enabled(features.PodReadyToStartContainersCondition) { - if conditionType == v1.PodReadyToStartContainers { - return true - } - } - return false -} - -// PodConditionSharedByKubelet returns if the pod condition type is shared by kubelet -func PodConditionSharedByKubelet(conditionType v1.PodConditionType) bool { - return conditionType == v1.DisruptionTarget -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go deleted file mode 100644 index ee4bd58c7e6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/apis/scheduling" -) - -// Annotation keys for annotations used in this package. -const ( - ConfigSourceAnnotationKey = "kubernetes.io/config.source" - ConfigMirrorAnnotationKey = v1.MirrorPodAnnotationKey - ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen" - ConfigHashAnnotationKey = "kubernetes.io/config.hash" -) - -// PodOperation defines what changes will be made on a pod configuration. -type PodOperation int - -// These constants identify the PodOperations that can be made on a pod configuration. -const ( - // SET is the current pod configuration. - SET PodOperation = iota - // ADD signifies pods that are new to this source. - ADD - // DELETE signifies pods that are gracefully deleted from this source. - DELETE - // REMOVE signifies pods that have been removed from this source. - REMOVE - // UPDATE signifies pods have been updated in this source. - UPDATE - // RECONCILE signifies pods that have unexpected status in this source, - // kubelet should reconcile status with this source. - RECONCILE -) - -// These constants identify the sources of pods. -const ( - // Filesource idenitified updates from a file. - FileSource = "file" - // HTTPSource identifies updates from querying a web page. - HTTPSource = "http" - // ApiserverSource identifies updates from Kubernetes API Server. - ApiserverSource = "api" - // AllSource identifies updates from all sources. - AllSource = "*" -) - -// NamespaceDefault is a string representing the default namespace. -const NamespaceDefault = metav1.NamespaceDefault - -// PodUpdate defines an operation sent on the channel. You can add or remove single services by -// sending an array of size one and Op == ADD|REMOVE (with REMOVE, only the ID is required). -// For setting the state of the system to a given state for this source configuration, set -// Pods as desired and Op to SET, which will reset the system state to that specified in this -// operation for this source channel. To remove all pods, set Pods to empty object and Op to SET. -// -// Additionally, Pods should never be nil - it should always point to an empty slice. While -// functionally similar, this helps our unit tests properly check that the correct PodUpdates -// are generated. -type PodUpdate struct { - Pods []*v1.Pod - Op PodOperation - Source string -} - -// GetValidatedSources gets all validated sources from the specified sources. -func GetValidatedSources(sources []string) ([]string, error) { - validated := make([]string, 0, len(sources)) - for _, source := range sources { - switch source { - case AllSource: - return []string{FileSource, HTTPSource, ApiserverSource}, nil - case FileSource, HTTPSource, ApiserverSource: - validated = append(validated, source) - case "": - // Skip - default: - return []string{}, fmt.Errorf("unknown pod source %q", source) - } - } - return validated, nil -} - -// GetPodSource returns the source of the pod based on the annotation. -func GetPodSource(pod *v1.Pod) (string, error) { - if pod.Annotations != nil { - if source, ok := pod.Annotations[ConfigSourceAnnotationKey]; ok { - return source, nil - } - } - return "", fmt.Errorf("cannot get source of pod %q", pod.UID) -} - -// SyncPodType classifies pod updates, eg: create, update. -type SyncPodType int - -const ( - // SyncPodSync is when the pod is synced to ensure desired state - SyncPodSync SyncPodType = iota - // SyncPodUpdate is when the pod is updated from source - SyncPodUpdate - // SyncPodCreate is when the pod is created from source - SyncPodCreate - // SyncPodKill is when the pod should have no running containers. A pod stopped in this way could be - // restarted in the future due config changes. - SyncPodKill -) - -func (sp SyncPodType) String() string { - switch sp { - case SyncPodCreate: - return "create" - case SyncPodUpdate: - return "update" - case SyncPodSync: - return "sync" - case SyncPodKill: - return "kill" - default: - return "unknown" - } -} - -// IsMirrorPod returns true if the passed Pod is a Mirror Pod. -func IsMirrorPod(pod *v1.Pod) bool { - if pod.Annotations == nil { - return false - } - _, ok := pod.Annotations[ConfigMirrorAnnotationKey] - return ok -} - -// IsStaticPod returns true if the pod is a static pod. -func IsStaticPod(pod *v1.Pod) bool { - source, err := GetPodSource(pod) - return err == nil && source != ApiserverSource -} - -// IsCriticalPod returns true if pod's priority is greater than or equal to SystemCriticalPriority. -func IsCriticalPod(pod *v1.Pod) bool { - if IsStaticPod(pod) { - return true - } - if IsMirrorPod(pod) { - return true - } - if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) { - return true - } - return false -} - -// Preemptable returns true if preemptor pod can preempt preemptee pod -// if preemptee is not critical or if preemptor's priority is greater than preemptee's priority -func Preemptable(preemptor, preemptee *v1.Pod) bool { - if IsCriticalPod(preemptor) && !IsCriticalPod(preemptee) { - return true - } - if (preemptor != nil && preemptor.Spec.Priority != nil) && - (preemptee != nil && preemptee.Spec.Priority != nil) { - return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority) - } - - return false -} - -// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec. -func IsCriticalPodBasedOnPriority(priority int32) bool { - return priority >= scheduling.SystemCriticalPriority -} - -// IsNodeCriticalPod checks if the given pod is a system-node-critical -func IsNodeCriticalPod(pod *v1.Pod) bool { - return IsCriticalPod(pod) && (pod.Spec.PriorityClassName == scheduling.SystemNodeCritical) -} - -// HasRestartableInitContainer returns true if the pod has any restartable init -// container -func HasRestartableInitContainer(pod *v1.Pod) bool { - for _, container := range pod.Spec.InitContainers { - if podutil.IsRestartableInitContainer(&container) { - return true - } - } - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go deleted file mode 100644 index 1cc6ceaef09..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "net/http" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/cri-client/pkg/logs" -) - -// TODO: Reconcile custom types in kubelet/types and this subpackage - -// HTTPDoer encapsulates http.Do functionality -type HTTPDoer interface { - Do(req *http.Request) (*http.Response, error) -} - -// Timestamp wraps around time.Time and offers utilities to format and parse -// the time using RFC3339Nano -type Timestamp struct { - time time.Time -} - -// NewTimestamp returns a Timestamp object using the current time. -func NewTimestamp() *Timestamp { - return &Timestamp{time.Now()} -} - -// ConvertToTimestamp takes a string, parses it using the RFC3339NanoLenient layout, -// and converts it to a Timestamp object. -func ConvertToTimestamp(timeString string) *Timestamp { - parsed, _ := time.Parse(logs.RFC3339NanoLenient, timeString) - return &Timestamp{parsed} -} - -// Get returns the time as time.Time. -func (t *Timestamp) Get() time.Time { - return t.time -} - -// GetString returns the time in the string format using the RFC3339NanoFixed -// layout. -func (t *Timestamp) GetString() string { - return t.time.Format(logs.RFC3339NanoFixed) -} - -// SortedContainerStatuses is a type to help sort container statuses based on container names. -type SortedContainerStatuses []v1.ContainerStatus - -func (s SortedContainerStatuses) Len() int { return len(s) } -func (s SortedContainerStatuses) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s SortedContainerStatuses) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -// SortInitContainerStatuses ensures that statuses are in the order that their -// init container appears in the pod spec. The function assumes there are no -// duplicate names in the statuses. -func SortInitContainerStatuses(p *v1.Pod, statuses []v1.ContainerStatus) { - containers := p.Spec.InitContainers - current := 0 - for _, container := range containers { - for j := current; j < len(statuses); j++ { - if container.Name == statuses[j].Name { - statuses[current], statuses[j] = statuses[j], statuses[current] - current++ - break - } - } - } -} - -// SortStatusesOfInitContainers returns the statuses of InitContainers of pod p, -// in the order that they appear in its spec. -func SortStatusesOfInitContainers(p *v1.Pod, statusMap map[string]*v1.ContainerStatus) []v1.ContainerStatus { - containers := p.Spec.InitContainers - statuses := []v1.ContainerStatus{} - for _, container := range containers { - if status, found := statusMap[container.Name]; found { - statuses = append(statuses, *status) - } - } - return statuses -} - -// Reservation represents reserved resources for non-pod components. -type Reservation struct { - // System represents resources reserved for non-kubernetes components. - System v1.ResourceList - // Kubernetes represents resources reserved for kubernetes system components. - Kubernetes v1.ResourceList -} - -// ResolvedPodUID is a pod UID which has been translated/resolved to the representation known to kubelets. -type ResolvedPodUID types.UID - -// MirrorPodUID is a pod UID for a mirror pod. -type MirrorPodUID types.UID diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go deleted file mode 100644 index b6a1cc38c81..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build darwin -// +build darwin - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// GetBootTime returns the time at which the machine was started, truncated to the nearest second -func GetBootTime() (time.Time, error) { - output, err := unix.SysctlRaw("kern.boottime") - if err != nil { - return time.Time{}, err - } - var timeval syscall.Timeval - if len(output) != int(unsafe.Sizeof(timeval)) { - return time.Time{}, fmt.Errorf("unexpected output when calling syscall kern.bootime. Expected len(output) to be %v, but got %v", - int(unsafe.Sizeof(timeval)), len(output)) - } - timeval = *(*syscall.Timeval)(unsafe.Pointer(&output[0])) - sec, nsec := timeval.Unix() - return time.Unix(sec, nsec).Truncate(time.Second), nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_freebsd.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_freebsd.go deleted file mode 100644 index 7d605d7611c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_freebsd.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build freebsd -// +build freebsd - -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "time" - - "golang.org/x/sys/unix" - "unsafe" -) - -// GetBootTime returns the time at which the machine was started, truncated to the nearest second -func GetBootTime() (time.Time, error) { - currentTime := time.Now() - ts := &unix.Timeval{} - _, _, e1 := unix.Syscall(uintptr(unix.SYS_CLOCK_GETTIME), uintptr(unix.CLOCK_UPTIME), uintptr(unsafe.Pointer(ts)), 0) - if e1 != 0 { - return time.Time{}, fmt.Errorf("error getting system uptime") - } - - return currentTime.Add(-time.Duration(ts.Sec) * time.Second).Truncate(time.Second), nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go deleted file mode 100644 index 935b3d80341..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go +++ /dev/null @@ -1,74 +0,0 @@ -//go:build linux -// +build linux - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "os" - "strconv" - "strings" - "time" - - "golang.org/x/sys/unix" - "k8s.io/klog/v2" -) - -// GetBootTime returns the time at which the machine was started, truncated to the nearest second. -// It uses /proc/stat first, which is more accurate, and falls back to the less accurate -// unix.Sysinfo if /proc/stat failed. -func GetBootTime() (time.Time, error) { - bootTime, err := getBootTimeWithProcStat() - if err != nil { - klog.InfoS("Failed to get boot time from /proc/uptime. Will retry with unix.Sysinfo.", "error", err) - return getBootTimeWithSysinfo() - } - return bootTime, nil -} - -func getBootTimeWithProcStat() (time.Time, error) { - raw, err := os.ReadFile("/proc/stat") - if err != nil { - return time.Time{}, fmt.Errorf("error getting boot time: %w", err) - } - rawFields := strings.Fields(string(raw)) - for i, v := range rawFields { - if v == "btime" { - if len(rawFields) > i+1 { - sec, err := strconv.ParseInt(rawFields[i+1], 10, 64) - if err != nil { - return time.Time{}, fmt.Errorf("error parsing boot time %s: %w", rawFields[i+1], err) - } - return time.Unix(sec, 0), nil - } - break - } - } - - return time.Time{}, fmt.Errorf("can not find btime from /proc/stat: %s", raw) -} - -func getBootTimeWithSysinfo() (time.Time, error) { - currentTime := time.Now() - var info unix.Sysinfo_t - if err := unix.Sysinfo(&info); err != nil { - return time.Time{}, fmt.Errorf("error getting system uptime: %w", err) - } - return currentTime.Add(-time.Duration(info.Uptime) * time.Second).Truncate(time.Second), nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go deleted file mode 100644 index 65fc8cf54a5..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package format - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" -) - -// Pod returns a string representing a pod in a consistent human readable format, -// with pod UID as part of the string. -func Pod(pod *v1.Pod) string { - if pod == nil { - return "" - } - return PodDesc(pod.Name, pod.Namespace, pod.UID) -} - -// PodDesc returns a string representing a pod in a consistent human readable format, -// with pod UID as part of the string. -func PodDesc(podName, podNamespace string, podUID types.UID) string { - // Use underscore as the delimiter because it is not allowed in pod name - // (DNS subdomain format), while allowed in the container name format. - return fmt.Sprintf("%s_%s(%s)", podName, podNamespace, podUID) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/node_startup_latency_tracker.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/node_startup_latency_tracker.go deleted file mode 100644 index 815e4e81eaf..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/node_startup_latency_tracker.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sync" - "time" - - "k8s.io/kubernetes/pkg/kubelet/metrics" - "k8s.io/utils/clock" -) - -type NodeStartupLatencyTracker interface { - // This function may be called across Kubelet restart. - RecordAttemptRegisterNode() - // This function should not be called across Kubelet restart. - RecordRegisteredNewNode() - // This function may be called across Kubelet restart. - RecordNodeReady() -} - -type basicNodeStartupLatencyTracker struct { - lock sync.Mutex - - bootTime time.Time - kubeletStartTime time.Time - firstRegistrationAttemptTime time.Time - firstRegisteredNewNodeTime time.Time - firstNodeReadyTime time.Time - - // For testability - clock clock.Clock -} - -func NewNodeStartupLatencyTracker() NodeStartupLatencyTracker { - bootTime, err := GetBootTime() - if err != nil { - bootTime = time.Time{} - } - return &basicNodeStartupLatencyTracker{ - bootTime: bootTime, - kubeletStartTime: time.Now(), - clock: clock.RealClock{}, - } -} - -func (n *basicNodeStartupLatencyTracker) RecordAttemptRegisterNode() { - n.lock.Lock() - defer n.lock.Unlock() - - if !n.firstRegistrationAttemptTime.IsZero() { - return - } - - n.firstRegistrationAttemptTime = n.clock.Now() -} - -func (n *basicNodeStartupLatencyTracker) RecordRegisteredNewNode() { - n.lock.Lock() - defer n.lock.Unlock() - - if n.firstRegistrationAttemptTime.IsZero() || !n.firstRegisteredNewNodeTime.IsZero() { - return - } - - n.firstRegisteredNewNodeTime = n.clock.Now() - - if !n.bootTime.IsZero() { - metrics.NodeStartupPreKubeletDuration.Set(n.kubeletStartTime.Sub(n.bootTime).Seconds()) - } - metrics.NodeStartupPreRegistrationDuration.Set(n.firstRegistrationAttemptTime.Sub(n.kubeletStartTime).Seconds()) - metrics.NodeStartupRegistrationDuration.Set(n.firstRegisteredNewNodeTime.Sub(n.firstRegistrationAttemptTime).Seconds()) -} - -func (n *basicNodeStartupLatencyTracker) RecordNodeReady() { - n.lock.Lock() - defer n.lock.Unlock() - - if n.firstRegisteredNewNodeTime.IsZero() || !n.firstNodeReadyTime.IsZero() { - return - } - - n.firstNodeReadyTime = n.clock.Now() - - metrics.NodeStartupPostRegistrationDuration.Set(n.firstNodeReadyTime.Sub(n.firstRegisteredNewNodeTime).Seconds()) - if !n.bootTime.IsZero() { - metrics.NodeStartupDuration.Set(n.firstNodeReadyTime.Sub(n.bootTime).Seconds()) - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/nodelease.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/nodelease.go deleted file mode 100644 index 9e1b00b7624..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/nodelease.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "context" - - coordinationv1 "k8s.io/api/coordination/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - - "k8s.io/klog/v2" -) - -// SetNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets -// a node OwnerReference to the given lease object -func SetNodeOwnerFunc(c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error { - return func(lease *coordinationv1.Lease) error { - // Setting owner reference needs node's UID. Note that it is different from - // kubelet.nodeRef.UID. When lease is initially created, it is possible that - // the connection between master and node is not ready yet. So try to set - // owner reference every time when renewing the lease, until successful. - if len(lease.OwnerReferences) == 0 { - if node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}); err == nil { - lease.OwnerReferences = []metav1.OwnerReference{ - { - APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, - Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind, - Name: nodeName, - UID: node.UID, - }, - } - } else { - klog.ErrorS(err, "Failed to get node when trying to set owner ref to the node lease", "node", klog.KRef("", nodeName)) - return err - } - } - return nil - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/pod_startup_latency_tracker.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/pod_startup_latency_tracker.go deleted file mode 100644 index 3ab3d2aaae5..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/pod_startup_latency_tracker.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/metrics" - "k8s.io/utils/clock" -) - -// PodStartupLatencyTracker records key moments for startup latency calculation, -// e.g. image pulling or pod observed running on watch. -type PodStartupLatencyTracker interface { - ObservedPodOnWatch(pod *v1.Pod, when time.Time) - RecordImageStartedPulling(podUID types.UID) - RecordImageFinishedPulling(podUID types.UID) - RecordStatusUpdated(pod *v1.Pod) - DeletePodStartupState(podUID types.UID) -} - -type basicPodStartupLatencyTracker struct { - // protect against concurrent read and write on pods map - lock sync.Mutex - pods map[types.UID]*perPodState - // metrics for the first network pod only - firstNetworkPodSeen bool - // For testability - clock clock.Clock -} - -type perPodState struct { - firstStartedPulling time.Time - lastFinishedPulling time.Time - // first time, when pod status changed into Running - observedRunningTime time.Time - // log, if pod latency was already Observed - metricRecorded bool -} - -// NewPodStartupLatencyTracker creates an instance of PodStartupLatencyTracker -func NewPodStartupLatencyTracker() PodStartupLatencyTracker { - return &basicPodStartupLatencyTracker{ - pods: map[types.UID]*perPodState{}, - clock: clock.RealClock{}, - } -} - -func (p *basicPodStartupLatencyTracker) ObservedPodOnWatch(pod *v1.Pod, when time.Time) { - p.lock.Lock() - defer p.lock.Unlock() - - // if the pod is terminal, we do not have to track it anymore for startup - if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded { - delete(p.pods, pod.UID) - return - } - - state := p.pods[pod.UID] - if state == nil { - // create a new record for pod, only if it was not yet acknowledged by the Kubelet - // this is required, as we want to log metric only for those pods, that where scheduled - // after Kubelet started - if pod.Status.StartTime.IsZero() { - p.pods[pod.UID] = &perPodState{} - } - - return - } - - if state.observedRunningTime.IsZero() { - // skip, pod didn't start yet - return - } - - if state.metricRecorded { - // skip, pod's latency already recorded - return - } - - if hasPodStartedSLO(pod) { - podStartingDuration := when.Sub(pod.CreationTimestamp.Time) - imagePullingDuration := state.lastFinishedPulling.Sub(state.firstStartedPulling) - podStartSLOduration := (podStartingDuration - imagePullingDuration).Seconds() - - klog.InfoS("Observed pod startup duration", - "pod", klog.KObj(pod), - "podStartSLOduration", podStartSLOduration, - "podStartE2EDuration", podStartingDuration, - "podCreationTimestamp", pod.CreationTimestamp.Time, - "firstStartedPulling", state.firstStartedPulling, - "lastFinishedPulling", state.lastFinishedPulling, - "observedRunningTime", state.observedRunningTime, - "watchObservedRunningTime", when) - - metrics.PodStartSLIDuration.WithLabelValues().Observe(podStartSLOduration) - metrics.PodStartTotalDuration.WithLabelValues().Observe(podStartingDuration.Seconds()) - state.metricRecorded = true - // if is the first Pod with network track the start values - // these metrics will help to identify problems with the CNI plugin - if !pod.Spec.HostNetwork && !p.firstNetworkPodSeen { - metrics.FirstNetworkPodStartSLIDuration.Set(podStartSLOduration) - p.firstNetworkPodSeen = true - } - } -} - -func (p *basicPodStartupLatencyTracker) RecordImageStartedPulling(podUID types.UID) { - p.lock.Lock() - defer p.lock.Unlock() - - state := p.pods[podUID] - if state == nil { - return - } - - if state.firstStartedPulling.IsZero() { - state.firstStartedPulling = p.clock.Now() - } -} - -func (p *basicPodStartupLatencyTracker) RecordImageFinishedPulling(podUID types.UID) { - p.lock.Lock() - defer p.lock.Unlock() - - state := p.pods[podUID] - if state == nil { - return - } - - state.lastFinishedPulling = p.clock.Now() // Now is always grater than values from the past. -} - -func (p *basicPodStartupLatencyTracker) RecordStatusUpdated(pod *v1.Pod) { - p.lock.Lock() - defer p.lock.Unlock() - - state := p.pods[pod.UID] - if state == nil { - return - } - - if state.metricRecorded { - // skip, pod latency already recorded - return - } - - if !state.observedRunningTime.IsZero() { - // skip, pod already started - return - } - - if hasPodStartedSLO(pod) { - klog.V(3).InfoS("Mark when the pod was running for the first time", "pod", klog.KObj(pod), "rv", pod.ResourceVersion) - state.observedRunningTime = p.clock.Now() - } -} - -// hasPodStartedSLO, check if for given pod, each container has been started at least once -// -// This should reflect "Pod startup latency SLI" definition -// ref: https://github.com/kubernetes/community/blob/master/sig-scalability/slos/pod_startup_latency.md -func hasPodStartedSLO(pod *v1.Pod) bool { - for _, cs := range pod.Status.ContainerStatuses { - if cs.State.Running == nil || cs.State.Running.StartedAt.IsZero() { - return false - } - } - - return true -} - -func (p *basicPodStartupLatencyTracker) DeletePodStartupState(podUID types.UID) { - p.lock.Lock() - defer p.lock.Unlock() - - delete(p.pods, podUID) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/doc.go deleted file mode 100644 index 04f0129b293..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package store hosts a Store interface and its implementations. -package store diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/filestore.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/filestore.go deleted file mode 100644 index 17c313602ad..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/filestore.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package store - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - utilfs "k8s.io/kubernetes/pkg/util/filesystem" -) - -const ( - // Name prefix for the temporary files. - tmpPrefix = "." -) - -// FileStore is an implementation of the Store interface which stores data in files. -type FileStore struct { - // Absolute path to the base directory for storing data files. - directoryPath string - - // filesystem to use. - filesystem utilfs.Filesystem -} - -// NewFileStore returns an instance of FileStore. -func NewFileStore(path string, fs utilfs.Filesystem) (Store, error) { - if err := fs.MkdirAll(path, 0755); err != nil { - return nil, err - } - return &FileStore{directoryPath: path, filesystem: fs}, nil -} - -// Write writes the given data to a file named key. -func (f *FileStore) Write(key string, data []byte) error { - if err := ValidateKey(key); err != nil { - return err - } - if err := f.filesystem.MkdirAll(f.directoryPath, 0755); err != nil { - return err - } - - return writeFile(f.filesystem, f.getPathByKey(key), data) -} - -// Read reads the data from the file named key. -func (f *FileStore) Read(key string) ([]byte, error) { - if err := ValidateKey(key); err != nil { - return nil, err - } - bytes, err := f.filesystem.ReadFile(f.getPathByKey(key)) - if os.IsNotExist(err) { - return bytes, ErrKeyNotFound - } - return bytes, err -} - -// Delete deletes the key file. -func (f *FileStore) Delete(key string) error { - if err := ValidateKey(key); err != nil { - return err - } - return removePath(f.filesystem, f.getPathByKey(key)) -} - -// List returns all keys in the store. -func (f *FileStore) List() ([]string, error) { - keys := make([]string, 0) - files, err := f.filesystem.ReadDir(f.directoryPath) - if err != nil { - return keys, err - } - for _, f := range files { - if !strings.HasPrefix(f.Name(), tmpPrefix) { - keys = append(keys, f.Name()) - } - } - return keys, nil -} - -// getPathByKey returns the full path of the file for the key. -func (f *FileStore) getPathByKey(key string) string { - return filepath.Join(f.directoryPath, key) -} - -// writeFile writes data to path in a single transaction. -func writeFile(fs utilfs.Filesystem, path string, data []byte) (retErr error) { - // Create a temporary file in the base directory of `path` with a prefix. - tmpFile, err := fs.TempFile(filepath.Dir(path), tmpPrefix) - if err != nil { - return err - } - - tmpPath := tmpFile.Name() - shouldClose := true - - defer func() { - // Close the file. - if shouldClose { - if err := tmpFile.Close(); err != nil { - if retErr == nil { - retErr = fmt.Errorf("close error: %v", err) - } else { - retErr = fmt.Errorf("failed to close temp file after error %v; close error: %v", retErr, err) - } - } - } - - // Clean up the temp file on error. - if retErr != nil && tmpPath != "" { - if err := removePath(fs, tmpPath); err != nil { - retErr = fmt.Errorf("failed to remove the temporary file (%q) after error %v; remove error: %v", tmpPath, retErr, err) - } - } - }() - - // Write data. - if _, err := tmpFile.Write(data); err != nil { - return err - } - - // Sync file. - if err := tmpFile.Sync(); err != nil { - return err - } - - // Closing the file before renaming. - err = tmpFile.Close() - shouldClose = false - if err != nil { - return err - } - - return fs.Rename(tmpPath, path) -} - -func removePath(fs utilfs.Filesystem, path string) error { - if err := fs.Remove(path); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/store.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/store.go deleted file mode 100644 index e797fd812b4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/store.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package store - -import ( - "fmt" - "regexp" -) - -const ( - keyMaxLength = 250 - - keyCharFmt string = "[A-Za-z0-9]" - keyExtCharFmt string = "[-A-Za-z0-9_.]" - qualifiedKeyFmt string = "(" + keyCharFmt + keyExtCharFmt + "*)?" + keyCharFmt -) - -var ( - // Key must consist of alphanumeric characters, '-', '_' or '.', and must start - // and end with an alphanumeric character. - keyRegex = regexp.MustCompile("^" + qualifiedKeyFmt + "$") - - // ErrKeyNotFound is the error returned if key is not found in Store. - ErrKeyNotFound = fmt.Errorf("key is not found") -) - -// Store provides the interface for storing keyed data. -// Store must be thread-safe -type Store interface { - // key must contain one or more characters in [A-Za-z0-9] - // Write writes data with key. - Write(key string, data []byte) error - // Read retrieves data with key - // Read must return ErrKeyNotFound if key is not found. - Read(key string) ([]byte, error) - // Delete deletes data by key - // Delete must not return error if key does not exist - Delete(key string) error - // List lists all existing keys. - List() ([]string, error) -} - -// ValidateKey returns an error if the given key does not meet the requirement -// of the key format and length. -func ValidateKey(key string) error { - if len(key) <= keyMaxLength && keyRegex.MatchString(key) { - return nil - } - return fmt.Errorf("invalid key: %q", key) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/swap/swap_util.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/swap/swap_util.go deleted file mode 100644 index 20f0e45ef5f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/swap/swap_util.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package swap - -import ( - "bytes" - "errors" - "os" - sysruntime "runtime" - "strings" - "sync" - - inuserns "github.com/moby/sys/userns" - "k8s.io/apimachinery/pkg/util/version" - "k8s.io/klog/v2" - utilkernel "k8s.io/kubernetes/pkg/util/kernel" - "k8s.io/mount-utils" -) - -var ( - tmpfsNoswapOptionSupported bool - tmpfsNoswapOptionAvailabilityOnce sync.Once - swapOn bool - swapOnErr error - swapOnOnce sync.Once -) - -const TmpfsNoswapOption = "noswap" - -func IsTmpfsNoswapOptionSupported(mounter mount.Interface, mountPath string) bool { - isTmpfsNoswapOptionSupportedHelper := func() bool { - if sysruntime.GOOS == "windows" { - return false - } - - if inuserns.RunningInUserNS() { - // Turning off swap in unprivileged tmpfs mounts unsupported - // https://github.com/torvalds/linux/blob/v6.8/mm/shmem.c#L4004-L4011 - // https://github.com/kubernetes/kubernetes/issues/125137 - klog.InfoS("Running under a user namespace - tmpfs noswap is not supported") - return false - } - - kernelVersion, err := utilkernel.GetVersion() - if err != nil { - klog.ErrorS(err, "cannot determine kernel version, unable to determine is tmpfs noswap is supported") - return false - } - - if kernelVersion.AtLeast(version.MustParseGeneric(utilkernel.TmpfsNoswapSupportKernelVersion)) { - return true - } - - if mountPath == "" { - klog.ErrorS(errors.New("mount path is empty, falling back to /tmp"), "") - } - - mountPath, err = os.MkdirTemp(mountPath, "tmpfs-noswap-test-") - if err != nil { - klog.InfoS("error creating dir to test if tmpfs noswap is enabled. Assuming not supported", "mount path", mountPath, "error", err) - return false - } - - defer func() { - err = os.RemoveAll(mountPath) - if err != nil { - klog.ErrorS(err, "error removing test tmpfs dir", "mount path", mountPath) - } - }() - - err = mounter.MountSensitiveWithoutSystemd("tmpfs", mountPath, "tmpfs", []string{TmpfsNoswapOption}, nil) - if err != nil { - klog.InfoS("error mounting tmpfs with the noswap option. Assuming not supported", "error", err) - return false - } - - err = mounter.Unmount(mountPath) - if err != nil { - klog.ErrorS(err, "error unmounting test tmpfs dir", "mount path", mountPath) - } - - return true - } - - tmpfsNoswapOptionAvailabilityOnce.Do(func() { - tmpfsNoswapOptionSupported = isTmpfsNoswapOptionSupportedHelper() - }) - - return tmpfsNoswapOptionSupported -} - -// gets /proc/swaps's content as an input, returns true if swap is enabled. -func isSwapOnAccordingToProcSwaps(procSwapsContent []byte) bool { - procSwapsContent = bytes.TrimSpace(procSwapsContent) // extra trailing \n - procSwapsStr := string(procSwapsContent) - procSwapsLines := strings.Split(procSwapsStr, "\n") - - // If there is more than one line (table headers) in /proc/swaps then swap is enabled - isSwapOn := len(procSwapsLines) > 1 - if isSwapOn { - klog.InfoS("Swap is on", "/proc/swaps contents", procSwapsStr) - } - - return isSwapOn -} - -// IsSwapOn detects whether swap in enabled on the system by inspecting -// /proc/swaps. If the file does not exist, an os.NotFound error will be returned. -// If running on windows, swap is assumed to always be false. -func IsSwapOn() (bool, error) { - isSwapOnHelper := func() (bool, error) { - if sysruntime.GOOS == "windows" { - return false, nil - } - - const swapFilePath = "/proc/swaps" - procSwapsContent, err := os.ReadFile(swapFilePath) - if err != nil { - if os.IsNotExist(err) { - klog.InfoS("File does not exist, assuming that swap is disabled", "path", swapFilePath) - return false, nil - } - - return false, err - } - - return isSwapOnAccordingToProcSwaps(procSwapsContent), nil - } - - swapOnOnce.Do(func() { - swapOn, swapOnErr = isSwapOnHelper() - }) - - return swapOn, swapOnErr -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go deleted file mode 100644 index 79473a18184..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/util/filesystem" -) - -// FromApiserverCache modifies so that the GET request will -// be served from apiserver cache instead of from etcd. -func FromApiserverCache(opts *metav1.GetOptions) { - opts.ResourceVersion = "0" -} - -var IsUnixDomainSocket = filesystem.IsUnixDomainSocket - -// GetNodenameForKernel gets hostname value to set in the hostname field (the nodename field of struct utsname) of the pod. -func GetNodenameForKernel(hostname string, hostDomainName string, setHostnameAsFQDN *bool) (string, error) { - kernelHostname := hostname - // FQDN has to be 64 chars to fit in the Linux nodename kernel field (specification 64 chars and the null terminating char). - const fqdnMaxLen = 64 - if len(hostDomainName) > 0 && setHostnameAsFQDN != nil && *setHostnameAsFQDN { - fqdn := fmt.Sprintf("%s.%s", hostname, hostDomainName) - // FQDN has to be shorter than hostnameMaxLen characters. - if len(fqdn) > fqdnMaxLen { - return "", fmt.Errorf("failed to construct FQDN from pod hostname and cluster domain, FQDN %s is too long (%d characters is the max, %d characters requested)", fqdn, fqdnMaxLen, len(fqdn)) - } - kernelHostname = fqdn - } - return kernelHostname, nil -} - -// GetContainerByIndex validates and extracts the container at index "idx" from -// "containers" with respect to "statuses". -// It returns true if the container is valid, else returns false. -func GetContainerByIndex(containers []v1.Container, statuses []v1.ContainerStatus, idx int) (v1.Container, bool) { - if idx < 0 || idx >= len(containers) || idx >= len(statuses) { - return v1.Container{}, false - } - if statuses[idx].Name != containers[idx].Name { - return v1.Container{}, false - } - return containers[idx], true -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_linux.go deleted file mode 100644 index 98efbb46fe2..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build linux -// +build linux - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - libcontainercgroups "github.com/opencontainers/cgroups" -) - -// IsCgroup2UnifiedMode returns true if the cgroup v2 unified mode is enabled -func IsCgroup2UnifiedMode() bool { - return libcontainercgroups.IsCgroup2UnifiedMode() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_others.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_others.go deleted file mode 100644 index e2e1c71bac6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_others.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !linux && !windows -// +build !linux,!windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -// IsCgroup2UnifiedMode is a no-op for other OSes. -func IsCgroup2UnifiedMode() bool { - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go deleted file mode 100644 index 0664b6a4248..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build freebsd || linux || darwin -// +build freebsd linux darwin - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "net/url" - "path/filepath" -) - -const ( - // unixProtocol is the network protocol of unix socket. - unixProtocol = "unix" -) - -// LocalEndpoint returns the full path to a unix socket at the given endpoint -func LocalEndpoint(path, file string) (string, error) { - u := url.URL{ - Scheme: unixProtocol, - Path: path, - } - return filepath.Join(u.String(), file+".sock"), nil -} - -// NormalizePath is a no-op for Linux for now -func NormalizePath(path string) string { - return path -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go deleted file mode 100644 index e95a7c583a8..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build !freebsd && !linux && !windows && !darwin -// +build !freebsd,!linux,!windows,!darwin - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "time" -) - -// LockAndCheckSubPath empty implementation -func LockAndCheckSubPath(volumePath, subPath string) ([]uintptr, error) { - return []uintptr{}, nil -} - -// UnlockPath empty implementation -func UnlockPath(fileHandles []uintptr) { -} - -// LocalEndpoint empty implementation -func LocalEndpoint(path, file string) (string, error) { - return "", fmt.Errorf("LocalEndpoints are unsupported in this build") -} - -// GetBootTime empty implementation -func GetBootTime() (time.Time, error) { - return time.Time{}, fmt.Errorf("GetBootTime is unsupported in this build") -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go deleted file mode 100644 index c944a7d22f2..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go +++ /dev/null @@ -1,80 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "path/filepath" - "strings" - "syscall" - "time" -) - -const npipeProtocol = "npipe" - -// LocalEndpoint returns the full path to a named pipe at the given endpoint - unlike on unix, we can't use sockets. -func LocalEndpoint(path, file string) (string, error) { - // extract the podresources config name from the path. We only need this on windows because the preferred layout of pipes, - // this is why we have the extra logic in here instead of changing the function signature. Join the file to make sure the - // last path component is a file, so the operation chain works.. - podResourcesDir := filepath.Base(filepath.Dir(filepath.Join(path, file))) - if podResourcesDir == "" { - // should not happen because the user can configure a root directory, and we expected a subdirectory inside - // the user supplied root directory named like "pod-resources" or so. - return "", fmt.Errorf("cannot infer the podresources directory from path %q", path) - } - // windows pipes are expected to use forward slashes: https://learn.microsoft.com/windows/win32/ipc/pipe-names - // so using `url` like we do on unix gives us unclear benefits - see https://github.com/kubernetes/kubernetes/issues/78628 - // So we just construct the path from scratch. - // Format: \\ServerName\pipe\PipeName - // Where where ServerName is either the name of a remote computer or a period, to specify the local computer. - // We only consider PipeName as regular windows path, while the pipe path components are fixed, hence we use constants. - serverPart := `\\.` - pipePart := "pipe" - pipeName := "kubelet-" + podResourcesDir - return npipeProtocol + "://" + filepath.Join(serverPart, pipePart, pipeName), nil -} - -var tickCount = syscall.NewLazyDLL("kernel32.dll").NewProc("GetTickCount64") - -// GetBootTime returns the time at which the machine was started, truncated to the nearest second -func GetBootTime() (time.Time, error) { - currentTime := time.Now() - output, _, err := tickCount.Call() - if errno, ok := err.(syscall.Errno); !ok || errno != 0 { - return time.Time{}, err - } - return currentTime.Add(-time.Duration(output) * time.Millisecond).Truncate(time.Second), nil -} - -// NormalizePath converts FS paths returned by certain go frameworks (like fsnotify) -// to native Windows paths that can be passed to Windows specific code -func NormalizePath(path string) string { - path = strings.ReplaceAll(path, "/", "\\") - if strings.HasPrefix(path, "\\") { - path = "c:" + path - } - return path -} - -// IsCgroup2UnifiedMode is a no-op for Windows for now -func IsCgroup2UnifiedMode() bool { - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/cpu_topology.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/cpu_topology.go deleted file mode 100644 index c6a7eab5c1c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/cpu_topology.go +++ /dev/null @@ -1,264 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winstats - -import ( - "fmt" - "syscall" - "unsafe" - - cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/klog/v2" -) - -var ( - procGetLogicalProcessorInformationEx = modkernel32.NewProc("GetLogicalProcessorInformationEx") - getNumaAvailableMemoryNodeEx = modkernel32.NewProc("GetNumaAvailableMemoryNodeEx") - procGetNumaNodeProcessorMaskEx = modkernel32.NewProc("GetNumaNodeProcessorMaskEx") -) - -type relationType int - -const ( - relationProcessorCore relationType = iota - relationNumaNode - relationCache - relationProcessorPackage - relationGroup - relationProcessorDie - relationNumaNodeEx - relationProcessorModule - relationAll = 0xffff -) - -type systemLogicalProcessorInformationEx struct { - Relationship uint32 - Size uint32 - data interface{} -} - -type processorRelationship struct { - Flags byte - EfficiencyClass byte - Reserved [20]byte - GroupCount uint16 - // groupMasks is an []GroupAffinity. In c++ this is a union of either one or many GroupAffinity based on GroupCount - GroupMasks interface{} -} - -// GroupAffinity represents the processor group affinity of cpus -// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-group_affinity -type GroupAffinity struct { - Mask uint64 - Group uint16 - Reserved [3]uint16 -} - -// MaskString returns the affinity mask as a string of 0s and 1s -func (a GroupAffinity) MaskString() string { - return fmt.Sprintf("%064b", a.Mask) -} - -// Processors returns a list of processors ids that are part of the affinity mask -// Windows doesn't track processors by ID but kubelet converts them to a number -func (a GroupAffinity) Processors() []int { - processors := []int{} - for i := 0; i < 64; i++ { - if a.Mask&(1< len(buffer) { - return 0, 0, nil, fmt.Errorf("remaining buffer too small while reading windows processor relationship") - } - info := (*systemLogicalProcessorInformationEx)(unsafe.Pointer(&buffer[offset])) - // check one more time now that we know the size of the struct - if offset+int(info.Size) > len(buffer) { - return 0, 0, nil, fmt.Errorf("remaining buffer too small while reading windows processor relationship") - } - switch (relationType)(info.Relationship) { - case relationProcessorCore, relationProcessorPackage: - relationship := (*processorRelationship)(unsafe.Pointer(&info.data)) - groupMasks := make([]GroupAffinity, relationship.GroupCount) - for i := 0; i < int(relationship.GroupCount); i++ { - groupMasks[i] = *(*GroupAffinity)(unsafe.Pointer(uintptr(unsafe.Pointer(&relationship.GroupMasks)) + uintptr(i)*unsafe.Sizeof(GroupAffinity{}))) - } - - if relationProcessorCore == (relationType)(info.Relationship) { - numOfcores++ - } - - if relationProcessorPackage == (relationType)(info.Relationship) { - numofSockets++ - } - - //iterate over group masks and add each processor to the map - for _, groupMask := range groupMasks { - for _, processorId := range groupMask.Processors() { - p, ok := logicalProcessors[processorId] - if !ok { - p = &processor{} - logicalProcessors[processorId] = p - } - if relationProcessorCore == (relationType)(info.Relationship) { - p.CoreID = numOfcores - } - if relationProcessorPackage == (relationType)(info.Relationship) { - p.SocketID = numofSockets - } - } - } - - case relationNumaNode, relationNumaNodeEx: - numaNodeRelationship := (*numaNodeRelationship)(unsafe.Pointer(&info.data)) - groupMasks := make([]GroupAffinity, numaNodeRelationship.GroupCount) - for i := 0; i < int(numaNodeRelationship.GroupCount); i++ { - groupMasks[i] = *(*GroupAffinity)(unsafe.Pointer(uintptr(unsafe.Pointer(&numaNodeRelationship.GroupMasks)) + uintptr(i)*unsafe.Sizeof(GroupAffinity{}))) - } - - nodes = append(nodes, cadvisorapi.Node{Id: int(numaNodeRelationship.NodeNumber)}) - - for _, groupMask := range groupMasks { - for processorId := range groupMask.Processors() { - p, ok := logicalProcessors[processorId] - if !ok { - p = &processor{} - logicalProcessors[processorId] = p - } - p.NodeID = int(numaNodeRelationship.NodeNumber) - } - } - - default: - klog.V(4).Infof("Not using Windows CPU relationship type: %d", info.Relationship) - } - - // Move the offset to the next SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX struct - offset += int(info.Size) - } - - for processId, p := range logicalProcessors { - node := nodes[p.NodeID] - if node.Id != p.NodeID { - return 0, 0, nil, fmt.Errorf("node ID mismatch: %d != %d", node.Id, p.NodeID) - } - availableBytes := uint64(0) - r1, _, err := getNumaAvailableMemoryNodeEx.Call(uintptr(p.NodeID), uintptr(unsafe.Pointer(&availableBytes))) - if r1 == 0 { - return 0, 0, nil, fmt.Errorf("call to GetNumaAvailableMemoryNodeEx failed: %v", err) - } - node.Memory = availableBytes - node.AddThread(processId, p.CoreID) - ok, coreIdx := node.FindCore(p.CoreID) - if !ok { - return 0, 0, nil, fmt.Errorf("core not found: %d", p.CoreID) - } - node.Cores[coreIdx].SocketID = p.SocketID - nodes[p.NodeID] = node - } - - return numOfcores, numofSockets, nodes, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/doc.go deleted file mode 100644 index 9802d587b5a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package winstats provides a client to get node and pod level stats on windows -package winstats diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/network_stats.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/network_stats.go deleted file mode 100644 index eb7f4cd8c53..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/network_stats.go +++ /dev/null @@ -1,313 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winstats - -import ( - "sync" - - cadvisorapi "github.com/google/cadvisor/info/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog/v2" -) - -const ( - packetsReceivedPerSecondQuery = "\\Network Adapter(*)\\Packets Received/sec" - packetsSentPerSecondQuery = "\\Network Adapter(*)\\Packets Sent/sec" - bytesReceivedPerSecondQuery = "\\Network Adapter(*)\\Bytes Received/sec" - bytesSentPerSecondQuery = "\\Network Adapter(*)\\Bytes Sent/sec" - packetsReceivedDiscardedQuery = "\\Network Adapter(*)\\Packets Received Discarded" - packetsReceivedErrorsQuery = "\\Network Adapter(*)\\Packets Received Errors" - packetsOutboundDiscardedQuery = "\\Network Adapter(*)\\Packets Outbound Discarded" - packetsOutboundErrorsQuery = "\\Network Adapter(*)\\Packets Outbound Errors" -) - -// networkCounter contains the counters for network adapters. -type networkCounter struct { - packetsReceivedPerSecondCounter perfCounter - packetsSentPerSecondCounter perfCounter - bytesReceivedPerSecondCounter perfCounter - bytesSentPerSecondCounter perfCounter - packetsReceivedDiscardedCounter perfCounter - packetsReceivedErrorsCounter perfCounter - packetsOutboundDiscardedCounter perfCounter - packetsOutboundErrorsCounter perfCounter - - mu sync.RWMutex - adapterStats map[string]cadvisorapi.InterfaceStats -} - -func newNetworkCounters() (*networkCounter, error) { - packetsReceivedPerSecondCounter, err := newPerfCounter(packetsReceivedPerSecondQuery) - if err != nil { - return nil, err - } - - packetsSentPerSecondCounter, err := newPerfCounter(packetsSentPerSecondQuery) - if err != nil { - return nil, err - } - - bytesReceivedPerSecondCounter, err := newPerfCounter(bytesReceivedPerSecondQuery) - if err != nil { - return nil, err - } - - bytesSentPerSecondCounter, err := newPerfCounter(bytesSentPerSecondQuery) - if err != nil { - return nil, err - } - - packetsReceivedDiscardedCounter, err := newPerfCounter(packetsReceivedDiscardedQuery) - if err != nil { - return nil, err - } - - packetsReceivedErrorsCounter, err := newPerfCounter(packetsReceivedErrorsQuery) - if err != nil { - return nil, err - } - - packetsOutboundDiscardedCounter, err := newPerfCounter(packetsOutboundDiscardedQuery) - if err != nil { - return nil, err - } - - packetsOutboundErrorsCounter, err := newPerfCounter(packetsOutboundErrorsQuery) - if err != nil { - return nil, err - } - - return &networkCounter{ - packetsReceivedPerSecondCounter: packetsReceivedPerSecondCounter, - packetsSentPerSecondCounter: packetsSentPerSecondCounter, - bytesReceivedPerSecondCounter: bytesReceivedPerSecondCounter, - bytesSentPerSecondCounter: bytesSentPerSecondCounter, - packetsReceivedDiscardedCounter: packetsReceivedDiscardedCounter, - packetsReceivedErrorsCounter: packetsReceivedErrorsCounter, - packetsOutboundDiscardedCounter: packetsOutboundDiscardedCounter, - packetsOutboundErrorsCounter: packetsOutboundErrorsCounter, - adapterStats: map[string]cadvisorapi.InterfaceStats{}, - }, nil -} - -func (n *networkCounter) getData() ([]cadvisorapi.InterfaceStats, error) { - packetsReceivedPerSecondData, err := n.packetsReceivedPerSecondCounter.getDataList() - if err != nil { - klog.ErrorS(err, "Unable to get packetsReceivedPerSecond perf counter data") - return nil, err - } - - packetsSentPerSecondData, err := n.packetsSentPerSecondCounter.getDataList() - if err != nil { - klog.ErrorS(err, "Unable to get packetsSentPerSecond perf counter data") - return nil, err - } - - bytesReceivedPerSecondData, err := n.bytesReceivedPerSecondCounter.getDataList() - if err != nil { - klog.ErrorS(err, "Unable to get bytesReceivedPerSecond perf counter data") - return nil, err - } - - bytesSentPerSecondData, err := n.bytesSentPerSecondCounter.getDataList() - if err != nil { - klog.ErrorS(err, "Unable to get bytesSentPerSecond perf counter data") - return nil, err - } - - packetsReceivedDiscardedData, err := n.packetsReceivedDiscardedCounter.getDataList() - if err != nil { - klog.ErrorS(err, "Unable to get packetsReceivedDiscarded perf counter data") - return nil, err - } - - packetsReceivedErrorsData, err := n.packetsReceivedErrorsCounter.getDataList() - if err != nil { - klog.ErrorS(err, "Unable to get packetsReceivedErrors perf counter data") - return nil, err - } - - packetsOutboundDiscardedData, err := n.packetsOutboundDiscardedCounter.getDataList() - if err != nil { - klog.ErrorS(err, "Unable to get packetsOutboundDiscarded perf counter data") - return nil, err - } - - packetsOutboundErrorsData, err := n.packetsOutboundErrorsCounter.getDataList() - if err != nil { - klog.ErrorS(err, "Unable to get packetsOutboundErrors perf counter data") - return nil, err - } - - n.mu.Lock() - defer n.mu.Unlock() - n.mergeCollectedData( - packetsReceivedPerSecondData, - packetsSentPerSecondData, - bytesReceivedPerSecondData, - bytesSentPerSecondData, - packetsReceivedDiscardedData, - packetsReceivedErrorsData, - packetsOutboundDiscardedData, - packetsOutboundErrorsData, - ) - return n.listInterfaceStats(), nil -} - -// mergeCollectedData merges the collected data into cache. It should be invoked under lock protected. -func (n *networkCounter) mergeCollectedData(packetsReceivedPerSecondData, - packetsSentPerSecondData, - bytesReceivedPerSecondData, - bytesSentPerSecondData, - packetsReceivedDiscardedData, - packetsReceivedErrorsData, - packetsOutboundDiscardedData, - packetsOutboundErrorsData map[string]uint64) { - adapters := sets.New[string]() - - // merge the collected data and list of adapters. - adapters.Insert(n.mergePacketsReceivedPerSecondData(packetsReceivedPerSecondData)...) - adapters.Insert(n.mergePacketsSentPerSecondData(packetsSentPerSecondData)...) - adapters.Insert(n.mergeBytesReceivedPerSecondData(bytesReceivedPerSecondData)...) - adapters.Insert(n.mergeBytesSentPerSecondData(bytesSentPerSecondData)...) - adapters.Insert(n.mergePacketsReceivedDiscardedData(packetsReceivedDiscardedData)...) - adapters.Insert(n.mergePacketsReceivedErrorsData(packetsReceivedErrorsData)...) - adapters.Insert(n.mergePacketsOutboundDiscardedData(packetsOutboundDiscardedData)...) - adapters.Insert(n.mergePacketsOutboundErrorsData(packetsOutboundErrorsData)...) - - // delete the cache for non-existing adapters. - for adapter := range n.adapterStats { - if !adapters.Has(adapter) { - delete(n.adapterStats, adapter) - } - } -} - -func (n *networkCounter) mergePacketsReceivedPerSecondData(packetsReceivedPerSecondData map[string]uint64) []string { - var adapters []string - for adapterName, value := range packetsReceivedPerSecondData { - adapters = append(adapters, adapterName) - newStat := n.adapterStats[adapterName] - newStat.Name = adapterName - newStat.RxPackets = newStat.RxPackets + value - n.adapterStats[adapterName] = newStat - } - - return adapters -} - -func (n *networkCounter) mergePacketsSentPerSecondData(packetsSentPerSecondData map[string]uint64) []string { - var adapters []string - for adapterName, value := range packetsSentPerSecondData { - adapters = append(adapters, adapterName) - newStat := n.adapterStats[adapterName] - newStat.Name = adapterName - newStat.TxPackets = newStat.TxPackets + value - n.adapterStats[adapterName] = newStat - } - - return adapters -} - -func (n *networkCounter) mergeBytesReceivedPerSecondData(bytesReceivedPerSecondData map[string]uint64) []string { - var adapters []string - for adapterName, value := range bytesReceivedPerSecondData { - adapters = append(adapters, adapterName) - newStat := n.adapterStats[adapterName] - newStat.Name = adapterName - newStat.RxBytes = newStat.RxBytes + value - n.adapterStats[adapterName] = newStat - } - - return adapters -} - -func (n *networkCounter) mergeBytesSentPerSecondData(bytesSentPerSecondData map[string]uint64) []string { - var adapters []string - for adapterName, value := range bytesSentPerSecondData { - adapters = append(adapters, adapterName) - newStat := n.adapterStats[adapterName] - newStat.Name = adapterName - newStat.TxBytes = newStat.TxBytes + value - n.adapterStats[adapterName] = newStat - } - - return adapters -} - -func (n *networkCounter) mergePacketsReceivedDiscardedData(packetsReceivedDiscardedData map[string]uint64) []string { - var adapters []string - for adapterName, value := range packetsReceivedDiscardedData { - adapters = append(adapters, adapterName) - newStat := n.adapterStats[adapterName] - newStat.Name = adapterName - newStat.RxDropped = value - n.adapterStats[adapterName] = newStat - } - - return adapters -} - -func (n *networkCounter) mergePacketsReceivedErrorsData(packetsReceivedErrorsData map[string]uint64) []string { - var adapters []string - for adapterName, value := range packetsReceivedErrorsData { - adapters = append(adapters, adapterName) - newStat := n.adapterStats[adapterName] - newStat.Name = adapterName - newStat.RxErrors = value - n.adapterStats[adapterName] = newStat - } - - return adapters -} - -func (n *networkCounter) mergePacketsOutboundDiscardedData(packetsOutboundDiscardedData map[string]uint64) []string { - var adapters []string - for adapterName, value := range packetsOutboundDiscardedData { - adapters = append(adapters, adapterName) - newStat := n.adapterStats[adapterName] - newStat.Name = adapterName - newStat.TxDropped = value - n.adapterStats[adapterName] = newStat - } - - return adapters -} - -func (n *networkCounter) mergePacketsOutboundErrorsData(packetsOutboundErrorsData map[string]uint64) []string { - var adapters []string - for adapterName, value := range packetsOutboundErrorsData { - adapters = append(adapters, adapterName) - newStat := n.adapterStats[adapterName] - newStat.Name = adapterName - newStat.TxErrors = value - n.adapterStats[adapterName] = newStat - } - - return adapters -} - -func (n *networkCounter) listInterfaceStats() []cadvisorapi.InterfaceStats { - stats := make([]cadvisorapi.InterfaceStats, 0, len(n.adapterStats)) - for _, stat := range n.adapterStats { - stats = append(stats, stat) - } - return stats -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats_windows.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats_windows.go deleted file mode 100644 index df5fb41ef68..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats_windows.go +++ /dev/null @@ -1,358 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winstats - -import ( - "os" - "runtime" - "strconv" - "strings" - "sync" - "syscall" - "time" - "unsafe" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - kubefeatures "k8s.io/kubernetes/pkg/features" - - cadvisorapi "github.com/google/cadvisor/info/v1" - "github.com/pkg/errors" - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/registry" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" -) - -const ( - bootIdRegistry = `SYSTEM\CurrentControlSet\Control\Session Manager\Memory Management\PrefetchParameters` - bootIdKey = `BootId` -) - -// MemoryStatusEx is the same as Windows structure MEMORYSTATUSEX -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type MemoryStatusEx struct { - Length uint32 - MemoryLoad uint32 - TotalPhys uint64 - AvailPhys uint64 - TotalPageFile uint64 - AvailPageFile uint64 - TotalVirtual uint64 - AvailVirtual uint64 - AvailExtendedVirtual uint64 -} - -// PerformanceInfo is the same as Windows structure PERFORMANCE_INFORMATION -// https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-performance_information -type PerformanceInformation struct { - cb uint32 - CommitTotalPages uint64 - CommitLimitPages uint64 - CommitPeakPages uint64 - PhysicalTotalPages uint64 - PhysicalAvailablePages uint64 - SystemCachePages uint64 - KernelTotalPages uint64 - KernelPagesPages uint64 - KernelNonpagedPages uint64 - PageSize uint64 - HandleCount uint32 - ProcessCount uint32 - ThreadCount uint32 -} - -var ( - // kernel32.dll system calls - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") - procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") - // psapi.dll system calls - modpsapi = windows.NewLazySystemDLL("psapi.dll") - procGetPerformanceInfo = modpsapi.NewProc("GetPerformanceInfo") -) - -const allProcessorGroups = 0xFFFF - -// NewPerfCounterClient creates a client using perf counters -func NewPerfCounterClient() (Client, error) { - // Initialize the cache - initCache := cpuUsageCoreNanoSecondsCache{0, 0} - return newClient(&perfCounterNodeStatsClient{ - cpuUsageCoreNanoSecondsCache: initCache, - }) -} - -// perfCounterNodeStatsClient is a client that provides Windows Stats via PerfCounters -type perfCounterNodeStatsClient struct { - nodeMetrics - mu sync.RWMutex // mu protects nodeMetrics - nodeInfo - // cpuUsageCoreNanoSecondsCache caches the cpu usage for nodes. - cpuUsageCoreNanoSecondsCache -} - -func (p *perfCounterNodeStatsClient) startMonitoring() error { - memory, err := getPhysicallyInstalledSystemMemoryBytes() - if err != nil { - return err - } - - osInfo, err := GetOSInfo() - if err != nil { - return err - } - - p.nodeInfo = nodeInfo{ - kernelVersion: osInfo.GetPatchVersion(), - osImageVersion: osInfo.ProductName, - memoryPhysicalCapacityBytes: memory, - startTime: time.Now(), - } - - cpuCounter, err := newPerfCounter(cpuQuery) - if err != nil { - return err - } - - memWorkingSetCounter, err := newPerfCounter(memoryPrivWorkingSetQuery) - if err != nil { - return err - } - - memCommittedBytesCounter, err := newPerfCounter(memoryCommittedBytesQuery) - if err != nil { - return err - } - - networkAdapterCounter, err := newNetworkCounters() - if err != nil { - return err - } - - go wait.Forever(func() { - p.collectMetricsData(cpuCounter, memWorkingSetCounter, memCommittedBytesCounter, networkAdapterCounter) - }, perfCounterUpdatePeriod) - - // Cache the CPU usage every defaultCachePeriod - go wait.Forever(func() { - newValue := p.nodeMetrics.cpuUsageCoreNanoSeconds - p.mu.Lock() - defer p.mu.Unlock() - p.cpuUsageCoreNanoSecondsCache = cpuUsageCoreNanoSecondsCache{ - previousValue: p.cpuUsageCoreNanoSecondsCache.latestValue, - latestValue: newValue, - } - }, defaultCachePeriod) - - return nil -} - -func (p *perfCounterNodeStatsClient) getMachineInfo() (*cadvisorapi.MachineInfo, error) { - hostname, err := os.Hostname() - if err != nil { - return nil, err - } - - systemUUID, err := getSystemUUID() - if err != nil { - return nil, err - } - - bootId, err := getBootID() - if err != nil { - return nil, err - } - - mi := &cadvisorapi.MachineInfo{ - NumCores: ProcessorCount(), - MemoryCapacity: p.nodeInfo.memoryPhysicalCapacityBytes, - MachineID: hostname, - SystemUUID: systemUUID, - BootID: bootId, - } - - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WindowsCPUAndMemoryAffinity) { - numOfPysicalCores, numOfSockets, topology, err := processorInfo(relationAll) - if err != nil { - return nil, err - } - - mi.NumPhysicalCores = numOfPysicalCores - mi.NumSockets = numOfSockets - mi.Topology = topology - } - - return mi, nil -} - -// ProcessorCount returns the number of logical processors on the system. -// runtime.NumCPU() will only return the information for a single Processor Group. -// Since a single group can only hold 64 logical processors, this -// means when there are more they will be divided into multiple groups. -// For the above reason, procGetActiveProcessorCount is used to get the -// cpu count for all processor groups of the windows node. -// more notes for this issue: -// same issue in moby: https://github.com/moby/moby/issues/38935#issuecomment-744638345 -// solution in hcsshim: https://github.com/microsoft/hcsshim/blob/master/internal/processorinfo/processor_count.go -func ProcessorCount() int { - if amount := getActiveProcessorCount(allProcessorGroups); amount != 0 { - return int(amount) - } - return runtime.NumCPU() -} - -func getActiveProcessorCount(groupNumber uint16) int { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) - return int(r0) -} - -func (p *perfCounterNodeStatsClient) getVersionInfo() (*cadvisorapi.VersionInfo, error) { - return &cadvisorapi.VersionInfo{ - KernelVersion: p.nodeInfo.kernelVersion, - ContainerOsVersion: p.nodeInfo.osImageVersion, - }, nil -} - -func (p *perfCounterNodeStatsClient) getNodeMetrics() (nodeMetrics, error) { - p.mu.RLock() - defer p.mu.RUnlock() - return p.nodeMetrics, nil -} - -func (p *perfCounterNodeStatsClient) getNodeInfo() nodeInfo { - return p.nodeInfo -} - -func (p *perfCounterNodeStatsClient) collectMetricsData(cpuCounter, memWorkingSetCounter, memCommittedBytesCounter perfCounter, networkAdapterCounter *networkCounter) { - cpuValue, err := cpuCounter.getData() - cpuCores := ProcessorCount() - if err != nil { - klog.ErrorS(err, "Unable to get cpu perf counter data") - return - } - - memWorkingSetValue, err := memWorkingSetCounter.getData() - if err != nil { - klog.ErrorS(err, "Unable to get memWorkingSet perf counter data") - return - } - - memCommittedBytesValue, err := memCommittedBytesCounter.getData() - if err != nil { - klog.ErrorS(err, "Unable to get memCommittedBytes perf counter data") - return - } - - networkAdapterStats, err := networkAdapterCounter.getData() - if err != nil { - klog.ErrorS(err, "Unable to get network adapter perf counter data") - return - } - - p.mu.Lock() - defer p.mu.Unlock() - p.nodeMetrics = nodeMetrics{ - cpuUsageCoreNanoSeconds: p.convertCPUValue(cpuCores, cpuValue), - cpuUsageNanoCores: p.getCPUUsageNanoCores(), - memoryPrivWorkingSetBytes: memWorkingSetValue, - memoryCommittedBytes: memCommittedBytesValue, - interfaceStats: networkAdapterStats, - timeStamp: time.Now(), - } -} - -func (p *perfCounterNodeStatsClient) convertCPUValue(cpuCores int, cpuValue uint64) uint64 { - // This converts perf counter data which is cpu percentage for all cores into nanoseconds. - // The formula is (cpuPercentage / 100.0) * #cores * 1e+9 (nano seconds). More info here: - // https://github.com/kubernetes/heapster/issues/650 - newValue := p.nodeMetrics.cpuUsageCoreNanoSeconds + uint64((float64(cpuValue)/100.0)*float64(cpuCores)*1e9) - return newValue -} - -func (p *perfCounterNodeStatsClient) getCPUUsageNanoCores() uint64 { - cachePeriodSeconds := uint64(defaultCachePeriod / time.Second) - perfCounterUpdatePeriodSeconds := uint64(perfCounterUpdatePeriod / time.Second) - cpuUsageNanoCores := ((p.cpuUsageCoreNanoSecondsCache.latestValue - p.cpuUsageCoreNanoSecondsCache.previousValue) * perfCounterUpdatePeriodSeconds) / cachePeriodSeconds - return cpuUsageNanoCores -} - -func getSystemUUID() (string, error) { - k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig`, registry.QUERY_VALUE) - if err != nil { - return "", errors.Wrap(err, "failed to open registry key HKLM\\SYSTEM\\HardwareConfig") - } - defer k.Close() - - uuid, _, err := k.GetStringValue("LastConfig") - if err != nil { - return "", errors.Wrap(err, "failed to read registry value LastConfig from key HKLM\\SYSTEM\\HardwareConfig") - } - - uuid = strings.Trim(uuid, "{") - uuid = strings.Trim(uuid, "}") - uuid = strings.ToUpper(uuid) - return uuid, nil -} - -func getPhysicallyInstalledSystemMemoryBytes() (uint64, error) { - // We use GlobalMemoryStatusEx instead of GetPhysicallyInstalledSystemMemory - // on Windows node for the following reasons: - // 1. GetPhysicallyInstalledSystemMemory retrieves the amount of physically - // installed RAM from the computer's SMBIOS firmware tables. - // https://msdn.microsoft.com/en-us/library/windows/desktop/cc300158(v=vs.85).aspx - // On some VM, it is unable to read data from SMBIOS and fails with ERROR_INVALID_DATA. - // 2. On Linux node, total physical memory is read from MemTotal in /proc/meminfo. - // GlobalMemoryStatusEx returns the amount of physical memory that is available - // for the operating system to use. The amount returned by GlobalMemoryStatusEx - // is closer in parity with Linux - // https://www.kernel.org/doc/Documentation/filesystems/proc.txt - var statex MemoryStatusEx - statex.Length = uint32(unsafe.Sizeof(statex)) - ret, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&statex))) - - if ret == 0 { - return 0, errors.New("unable to read physical memory") - } - - return statex.TotalPhys, nil -} - -func GetPerformanceInfo() (*PerformanceInformation, error) { - var pi PerformanceInformation - pi.cb = uint32(unsafe.Sizeof(pi)) - ret, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&pi)), uintptr(pi.cb)) - if ret == 0 { - return nil, errors.New("unable to read Windows performance information") - } - return &pi, nil -} - -func getBootID() (string, error) { - regKey, err := registry.OpenKey(registry.LOCAL_MACHINE, bootIdRegistry, registry.READ) - if err != nil { - return "", err - } - defer regKey.Close() - regValue, _, err := regKey.GetIntegerValue(bootIdKey) - if err != nil { - return "", err - } - return strconv.FormatUint(regValue, 10), nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go deleted file mode 100644 index 4b483d41d86..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go +++ /dev/null @@ -1,136 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winstats - -import ( - "errors" - "fmt" - "time" - "unsafe" - - "github.com/JeffAshton/win_pdh" -) - -const ( - cpuQuery = "\\Processor(_Total)\\% Processor Time" - memoryPrivWorkingSetQuery = "\\Process(_Total)\\Working Set - Private" - memoryCommittedBytesQuery = "\\Memory\\Committed Bytes" - // Perf counters are updated 10 seconds. This is the same as the default cadvisor housekeeping interval - // set at https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/cadvisor/cadvisor_linux.go - perfCounterUpdatePeriod = 10 * time.Second - // defaultCachePeriod is the default cache period for each cpuUsage. - // This matches with the cadvisor setting and the time interval we use for containers. - // see https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/cadvisor/cadvisor_linux.go#L63 - defaultCachePeriod = 10 * time.Second -) - -type perfCounter interface { - getData() (uint64, error) - getDataList() (map[string]uint64, error) -} - -type perfCounterImpl struct { - queryHandle win_pdh.PDH_HQUERY - counterHandle win_pdh.PDH_HCOUNTER -} - -func newPerfCounter(counter string) (perfCounter, error) { - var queryHandle win_pdh.PDH_HQUERY - var counterHandle win_pdh.PDH_HCOUNTER - - ret := win_pdh.PdhOpenQuery(0, 0, &queryHandle) - if ret != win_pdh.ERROR_SUCCESS { - return nil, errors.New("unable to open query through DLL call") - } - - ret = win_pdh.PdhAddEnglishCounter(queryHandle, counter, 0, &counterHandle) - if ret != win_pdh.ERROR_SUCCESS { - return nil, fmt.Errorf("unable to add process counter: %s. Error code is %x", counter, ret) - } - - ret = win_pdh.PdhCollectQueryData(queryHandle) - if ret != win_pdh.ERROR_SUCCESS { - return nil, fmt.Errorf("unable to collect data from counter. Error code is %x", ret) - } - - return &perfCounterImpl{ - queryHandle: queryHandle, - counterHandle: counterHandle, - }, nil -} - -// getData is used for getting data without * in counter name. -func (p *perfCounterImpl) getData() (uint64, error) { - filledBuf, bufCount, err := p.getQueriedData() - if err != nil { - return 0, err - } - - var data uint64 = 0 - for i := 0; i < int(bufCount); i++ { - c := filledBuf[i] - data = uint64(c.FmtValue.DoubleValue) - } - - return data, nil -} - -// getDataList is used for getting data with * in counter name. -func (p *perfCounterImpl) getDataList() (map[string]uint64, error) { - filledBuf, bufCount, err := p.getQueriedData() - if err != nil { - return nil, err - } - - data := map[string]uint64{} - for i := 0; i < int(bufCount); i++ { - c := filledBuf[i] - value := uint64(c.FmtValue.DoubleValue) - name := win_pdh.UTF16PtrToString(c.SzName) - data[name] = value - } - - return data, nil -} - -// getQueriedData is used for getting data using the given query handle. -func (p *perfCounterImpl) getQueriedData() ([]win_pdh.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, uint32, error) { - ret := win_pdh.PdhCollectQueryData(p.queryHandle) - if ret != win_pdh.ERROR_SUCCESS { - return nil, 0, fmt.Errorf("unable to collect data from counter. Error code is %x", ret) - } - - var bufSize, bufCount uint32 - var size = uint32(unsafe.Sizeof(win_pdh.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{})) - var emptyBuf [1]win_pdh.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr. - - ret = win_pdh.PdhGetFormattedCounterArrayDouble(p.counterHandle, &bufSize, &bufCount, &emptyBuf[0]) - if ret != win_pdh.PDH_MORE_DATA { - return nil, 0, fmt.Errorf("unable to collect data from counter. Error code is %x", ret) - } - - filledBuf := make([]win_pdh.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size) - ret = win_pdh.PdhGetFormattedCounterArrayDouble(p.counterHandle, &bufSize, &bufCount, &filledBuf[0]) - if ret != win_pdh.ERROR_SUCCESS { - return nil, 0, fmt.Errorf("unable to collect data from counter. Error code is %x", ret) - } - - return filledBuf, bufCount, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go deleted file mode 100644 index 80e9442f8ce..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go +++ /dev/null @@ -1,86 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winstats - -import ( - "fmt" - "golang.org/x/sys/windows/registry" -) - -// OSInfo is a convenience class for retrieving Windows OS information -type OSInfo struct { - BuildNumber, ProductName string - MajorVersion, MinorVersion, UBR uint64 -} - -// GetOSInfo reads Windows version information from the registry -func GetOSInfo() (*OSInfo, error) { - // for log detail - var keyPrefix string = `regedit:LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion` - - k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { - return nil, fmt.Errorf("getOSInfo, open Windows %s failed: %w", keyPrefix, err) - } - defer k.Close() - - buildNumber, _, err := k.GetStringValue("CurrentBuildNumber") - if err != nil { - return nil, fmt.Errorf("getOSInfo, get %s\\CurrentBuildNumber failed: %w", keyPrefix, err) - } - - majorVersionNumber, _, err := k.GetIntegerValue("CurrentMajorVersionNumber") - if err != nil { - return nil, fmt.Errorf("getOSInfo, get %s\\CurrentMajorVersionNumber failed: %w", keyPrefix, err) - } - - minorVersionNumber, _, err := k.GetIntegerValue("CurrentMinorVersionNumber") - if err != nil { - return nil, fmt.Errorf("getOSInfo, get %s\\CurrentMinorVersionNumber failed: %w", keyPrefix, err) - } - - revision, _, err := k.GetIntegerValue("UBR") - if err != nil { - return nil, fmt.Errorf("getOSInfo, get %s\\UBR failed: %w", keyPrefix, err) - } - - productName, _, err := k.GetStringValue("ProductName") - if err != nil { - return nil, fmt.Errorf("getOSInfo, get %s\\ProductName failed: %w", keyPrefix, err) - } - - return &OSInfo{ - BuildNumber: buildNumber, - ProductName: productName, - MajorVersion: majorVersionNumber, - MinorVersion: minorVersionNumber, - UBR: revision, - }, nil -} - -// GetPatchVersion returns full OS version with patch -func (o *OSInfo) GetPatchVersion() string { - return fmt.Sprintf("%d.%d.%s.%d", o.MajorVersion, o.MinorVersion, o.BuildNumber, o.UBR) -} - -// GetBuild returns OS version upto build number -func (o *OSInfo) GetBuild() string { - return fmt.Sprintf("%d.%d.%s", o.MajorVersion, o.MinorVersion, o.BuildNumber) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/winstats.go b/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/winstats.go deleted file mode 100644 index 84fe6212ea5..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/winstats.go +++ /dev/null @@ -1,188 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package winstats provides a client to get node and pod level stats on windows -package winstats - -import ( - "syscall" - "time" - "unsafe" - - cadvisorapi "github.com/google/cadvisor/info/v1" - cadvisorapiv2 "github.com/google/cadvisor/info/v2" -) - -var ( - procGetDiskFreeSpaceEx = modkernel32.NewProc("GetDiskFreeSpaceExW") -) - -// Client is an interface that is used to get stats information. -type Client interface { - WinContainerInfos() (map[string]cadvisorapiv2.ContainerInfo, error) - WinMachineInfo() (*cadvisorapi.MachineInfo, error) - WinVersionInfo() (*cadvisorapi.VersionInfo, error) - GetDirFsInfo(path string) (cadvisorapiv2.FsInfo, error) -} - -// StatsClient is a client that implements the Client interface -type StatsClient struct { - client winNodeStatsClient -} - -type winNodeStatsClient interface { - startMonitoring() error - getNodeMetrics() (nodeMetrics, error) - getNodeInfo() nodeInfo - getMachineInfo() (*cadvisorapi.MachineInfo, error) - getVersionInfo() (*cadvisorapi.VersionInfo, error) -} - -type nodeMetrics struct { - cpuUsageCoreNanoSeconds uint64 - cpuUsageNanoCores uint64 - memoryPrivWorkingSetBytes uint64 - memoryCommittedBytes uint64 - timeStamp time.Time - interfaceStats []cadvisorapi.InterfaceStats -} - -type nodeInfo struct { - memoryPhysicalCapacityBytes uint64 - kernelVersion string - osImageVersion string - // startTime is the time when the node was started - startTime time.Time -} - -type cpuUsageCoreNanoSecondsCache struct { - latestValue uint64 - previousValue uint64 -} - -// newClient constructs a Client. -func newClient(statsNodeClient winNodeStatsClient) (Client, error) { - statsClient := new(StatsClient) - statsClient.client = statsNodeClient - - err := statsClient.client.startMonitoring() - if err != nil { - return nil, err - } - - return statsClient, nil -} - -// WinContainerInfos returns a map of container infos. The map contains node and -// pod level stats. Analogous to cadvisor GetContainerInfoV2 method. -func (c *StatsClient) WinContainerInfos() (map[string]cadvisorapiv2.ContainerInfo, error) { - infos := make(map[string]cadvisorapiv2.ContainerInfo) - rootContainerInfo, err := c.createRootContainerInfo() - if err != nil { - return nil, err - } - - infos["/"] = *rootContainerInfo - - return infos, nil -} - -// WinMachineInfo returns a cadvisorapi.MachineInfo with details about the -// node machine. Analogous to cadvisor MachineInfo method. -func (c *StatsClient) WinMachineInfo() (*cadvisorapi.MachineInfo, error) { - return c.client.getMachineInfo() -} - -// WinVersionInfo returns a cadvisorapi.VersionInfo with version info of -// the kernel and docker runtime. Analogous to cadvisor VersionInfo method. -func (c *StatsClient) WinVersionInfo() (*cadvisorapi.VersionInfo, error) { - return c.client.getVersionInfo() -} - -func (c *StatsClient) createRootContainerInfo() (*cadvisorapiv2.ContainerInfo, error) { - nodeMetrics, err := c.client.getNodeMetrics() - if err != nil { - return nil, err - } - - var stats []*cadvisorapiv2.ContainerStats - stats = append(stats, &cadvisorapiv2.ContainerStats{ - Timestamp: nodeMetrics.timeStamp, - Cpu: &cadvisorapi.CpuStats{ - Usage: cadvisorapi.CpuUsage{ - Total: nodeMetrics.cpuUsageCoreNanoSeconds, - }, - }, - CpuInst: &cadvisorapiv2.CpuInstStats{ - Usage: cadvisorapiv2.CpuInstUsage{ - Total: nodeMetrics.cpuUsageNanoCores, - }, - }, - Memory: &cadvisorapi.MemoryStats{ - WorkingSet: nodeMetrics.memoryPrivWorkingSetBytes, - Usage: nodeMetrics.memoryCommittedBytes, - }, - Network: &cadvisorapiv2.NetworkStats{ - Interfaces: nodeMetrics.interfaceStats, - }, - }) - - nodeInfo := c.client.getNodeInfo() - rootInfo := cadvisorapiv2.ContainerInfo{ - Spec: cadvisorapiv2.ContainerSpec{ - CreationTime: nodeInfo.startTime, - HasCpu: true, - HasMemory: true, - HasNetwork: true, - Memory: cadvisorapiv2.MemorySpec{ - Limit: nodeInfo.memoryPhysicalCapacityBytes, - }, - }, - Stats: stats, - } - - return &rootInfo, nil -} - -// GetDirFsInfo returns filesystem capacity and usage information. -func (c *StatsClient) GetDirFsInfo(path string) (cadvisorapiv2.FsInfo, error) { - var freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes int64 - var err error - - ret, _, err := syscall.Syscall6( - procGetDiskFreeSpaceEx.Addr(), - 4, - uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), - uintptr(unsafe.Pointer(&freeBytesAvailable)), - uintptr(unsafe.Pointer(&totalNumberOfBytes)), - uintptr(unsafe.Pointer(&totalNumberOfFreeBytes)), - 0, - 0, - ) - if ret == 0 { - return cadvisorapiv2.FsInfo{}, err - } - - return cadvisorapiv2.FsInfo{ - Timestamp: time.Now(), - Capacity: uint64(totalNumberOfBytes), - Available: uint64(freeBytesAvailable), - Usage: uint64(totalNumberOfBytes - freeBytesAvailable), - }, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/probe/dialer_others.go b/e2e/vendor/k8s.io/kubernetes/pkg/probe/dialer_others.go deleted file mode 100644 index 7b02daff340..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/probe/dialer_others.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build !windows -// +build !windows - -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package probe - -import ( - "net" - "syscall" -) - -// ProbeDialer returns a dialer optimized for probes to avoid lingering sockets on TIME-WAIT state. -// The dialer reduces the TIME-WAIT period to 1 seconds instead of the OS default of 60 seconds. -// Using 1 second instead of 0 because SO_LINGER socket option to 0 causes pending data to be -// discarded and the connection to be aborted with an RST rather than for the pending data to be -// transmitted and the connection closed cleanly with a FIN. -// Ref: https://issues.k8s.io/89898 -func ProbeDialer() *net.Dialer { - dialer := &net.Dialer{ - Control: func(network, address string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - syscall.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, &syscall.Linger{Onoff: 1, Linger: 1}) - }) - }, - } - return dialer -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/probe/dialer_windows.go b/e2e/vendor/k8s.io/kubernetes/pkg/probe/dialer_windows.go deleted file mode 100644 index ab0d020478c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/probe/dialer_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package probe - -import ( - "net" - "syscall" -) - -// ProbeDialer returns a dialer optimized for probes to avoid lingering sockets on TIME-WAIT state. -// The dialer reduces the TIME-WAIT period to 1 seconds instead of the OS default of 60 seconds. -// Using 1 second instead of 0 because SO_LINGER socket option to 0 causes pending data to be -// discarded and the connection to be aborted with an RST rather than for the pending data to be -// transmitted and the connection closed cleanly with a FIN. -// Ref: https://issues.k8s.io/89898 -func ProbeDialer() *net.Dialer { - dialer := &net.Dialer{ - Control: func(network, address string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - syscall.SetsockoptLinger(syscall.Handle(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, &syscall.Linger{Onoff: 1, Linger: 1}) - }) - }, - } - return dialer -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/probe/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/probe/doc.go deleted file mode 100644 index 89c70b2c16a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/probe/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package probe contains utilities for health probing, as well as health status information. -package probe diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/probe/http/http.go b/e2e/vendor/k8s.io/kubernetes/pkg/probe/http/http.go deleted file mode 100644 index 20e33da8ed4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/probe/http/http.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package http - -import ( - "crypto/tls" - "errors" - "fmt" - "net/http" - "time" - - utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/kubernetes/pkg/probe" - - "k8s.io/klog/v2" - utilio "k8s.io/utils/io" -) - -const ( - maxRespBodyLength = 10 * 1 << 10 // 10KB -) - -// New creates Prober that will skip TLS verification while probing. -// followNonLocalRedirects configures whether the prober should follow redirects to a different hostname. -// If disabled, redirects to other hosts will trigger a warning result. -func New(followNonLocalRedirects bool) Prober { - tlsConfig := &tls.Config{InsecureSkipVerify: true} - return NewWithTLSConfig(tlsConfig, followNonLocalRedirects) -} - -// NewWithTLSConfig takes tls config as parameter. -// followNonLocalRedirects configures whether the prober should follow redirects to a different hostname. -// If disabled, redirects to other hosts will trigger a warning result. -func NewWithTLSConfig(config *tls.Config, followNonLocalRedirects bool) Prober { - // We do not want the probe use node's local proxy set. - transport := utilnet.SetTransportDefaults( - &http.Transport{ - TLSClientConfig: config, - DisableKeepAlives: true, - Proxy: http.ProxyURL(nil), - DisableCompression: true, // removes Accept-Encoding header - // DialContext creates unencrypted TCP connections - // and is also used by the transport for HTTPS connection - DialContext: probe.ProbeDialer().DialContext, - }) - - return httpProber{transport, followNonLocalRedirects} -} - -// Prober is an interface that defines the Probe function for doing HTTP readiness/liveness checks. -type Prober interface { - Probe(req *http.Request, timeout time.Duration) (probe.Result, string, error) -} - -type httpProber struct { - transport *http.Transport - followNonLocalRedirects bool -} - -// Probe returns a ProbeRunner capable of running an HTTP check. -func (pr httpProber) Probe(req *http.Request, timeout time.Duration) (probe.Result, string, error) { - client := &http.Client{ - Timeout: timeout, - Transport: pr.transport, - CheckRedirect: RedirectChecker(pr.followNonLocalRedirects), - } - return DoHTTPProbe(req, client) -} - -// GetHTTPInterface is an interface for making HTTP requests, that returns a response and error. -type GetHTTPInterface interface { - Do(req *http.Request) (*http.Response, error) -} - -// DoHTTPProbe checks if a GET request to the url succeeds. -// If the HTTP response code is successful (i.e. 400 > code >= 200), it returns Success. -// If the HTTP response code is unsuccessful or HTTP communication fails, it returns Failure. -// This is exported because some other packages may want to do direct HTTP probes. -func DoHTTPProbe(req *http.Request, client GetHTTPInterface) (probe.Result, string, error) { - url := req.URL - headers := req.Header - res, err := client.Do(req) - if err != nil { - // Convert errors into failures to catch timeouts. - return probe.Failure, err.Error(), nil - } - defer res.Body.Close() - b, err := utilio.ReadAtMost(res.Body, maxRespBodyLength) - if err != nil { - if err == utilio.ErrLimitReached { - klog.V(4).Infof("Non fatal body truncation for %s, Response: %v", url.String(), *res) - } else { - return probe.Failure, "", err - } - } - body := string(b) - if res.StatusCode >= http.StatusOK && res.StatusCode < http.StatusBadRequest { - if res.StatusCode >= http.StatusMultipleChoices { // Redirect - klog.V(4).Infof("Probe terminated redirects for %s, Response: %v", url.String(), *res) - return probe.Warning, fmt.Sprintf("Probe terminated redirects, Response body: %v", body), nil - } - klog.V(4).Infof("Probe succeeded for %s, Response: %v", url.String(), *res) - return probe.Success, body, nil - } - klog.V(4).Infof("Probe failed for %s with request headers %v, response body: %v", url.String(), headers, body) - // Note: Until https://issue.k8s.io/99425 is addressed, this user-facing failure message must not contain the response body. - failureMsg := fmt.Sprintf("HTTP probe failed with statuscode: %d", res.StatusCode) - return probe.Failure, failureMsg, nil -} - -// RedirectChecker returns a function that can be used to check HTTP redirects. -func RedirectChecker(followNonLocalRedirects bool) func(*http.Request, []*http.Request) error { - if followNonLocalRedirects { - return nil // Use the default http client checker. - } - - return func(req *http.Request, via []*http.Request) error { - if req.URL.Hostname() != via[0].URL.Hostname() { - return http.ErrUseLastResponse - } - // Default behavior: stop after 10 redirects. - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - return nil - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/probe/http/request.go b/e2e/vendor/k8s.io/kubernetes/pkg/probe/http/request.go deleted file mode 100644 index fb7f818b249..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/probe/http/request.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package http - -import ( - "fmt" - "net" - "net/http" - "net/url" - "strconv" - "strings" - - v1 "k8s.io/api/core/v1" - "k8s.io/component-base/version" - "k8s.io/kubernetes/pkg/probe" -) - -// NewProbeRequest returns an http.Request suitable for use as a request for a -// probe. -func NewProbeRequest(url *url.URL, headers http.Header) (*http.Request, error) { - return newProbeRequest(url, headers, "probe") -} - -// NewRequestForHTTPGetAction returns an http.Request derived from httpGet. -// When httpGet.Host is empty, podIP will be used instead. -func NewRequestForHTTPGetAction(httpGet *v1.HTTPGetAction, container *v1.Container, podIP string, userAgentFragment string) (*http.Request, error) { - scheme := strings.ToLower(string(httpGet.Scheme)) - if scheme == "" { - scheme = "http" - } - - host := httpGet.Host - if host == "" { - host = podIP - } - - port, err := probe.ResolveContainerPort(httpGet.Port, container) - if err != nil { - return nil, err - } - - path := httpGet.Path - url := formatURL(scheme, host, port, path) - headers := v1HeaderToHTTPHeader(httpGet.HTTPHeaders) - - return newProbeRequest(url, headers, userAgentFragment) -} - -func newProbeRequest(url *url.URL, headers http.Header, userAgentFragment string) (*http.Request, error) { - req, err := http.NewRequest("GET", url.String(), nil) - if err != nil { - return nil, err - } - - if headers == nil { - headers = http.Header{} - } - if _, ok := headers["User-Agent"]; !ok { - // User-Agent header was not defined, set it - headers.Set("User-Agent", userAgent(userAgentFragment)) - } - if _, ok := headers["Accept"]; !ok { - // Accept header was not defined. accept all - headers.Set("Accept", "*/*") - } else if headers.Get("Accept") == "" { - // Accept header was overridden but is empty. removing - headers.Del("Accept") - } - req.Header = headers - req.Host = headers.Get("Host") - - return req, nil -} - -func userAgent(purpose string) string { - v := version.Get() - return fmt.Sprintf("kube-%s/%s.%s", purpose, v.Major, v.Minor) -} - -// formatURL formats a URL from args. For testability. -func formatURL(scheme string, host string, port int, path string) *url.URL { - u, err := url.Parse(path) - // Something is busted with the path, but it's too late to reject it. Pass it along as is. - // - // This construction of a URL may be wrong in some cases, but it preserves - // legacy prober behavior. - if err != nil { - u = &url.URL{ - Path: path, - } - } - u.Scheme = scheme - u.Host = net.JoinHostPort(host, strconv.Itoa(port)) - return u -} - -// v1HeaderToHTTPHeader takes a list of HTTPHeader string pairs -// and returns a populated string->[]string http.Header map. -func v1HeaderToHTTPHeader(headerList []v1.HTTPHeader) http.Header { - headers := make(http.Header) - for _, header := range headerList { - headers.Add(header.Name, header.Value) - } - return headers -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/probe/probe.go b/e2e/vendor/k8s.io/kubernetes/pkg/probe/probe.go deleted file mode 100644 index c47b6c2fef2..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/probe/probe.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package probe - -// Result is a string used to handle the results for probing container readiness/liveness -type Result string - -const ( - // Success Result - Success Result = "success" - // Warning Result. Logically success, but with additional debugging information attached. - Warning Result = "warning" - // Failure Result - Failure Result = "failure" - // Unknown Result - Unknown Result = "unknown" -) diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/probe/util.go b/e2e/vendor/k8s.io/kubernetes/pkg/probe/util.go deleted file mode 100644 index cb251287450..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/probe/util.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package probe - -import ( - "fmt" - "strconv" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -func ResolveContainerPort(param intstr.IntOrString, container *v1.Container) (int, error) { - port := -1 - var err error - switch param.Type { - case intstr.Int: - port = param.IntValue() - case intstr.String: - if port, err = findPortByName(container, param.StrVal); err != nil { - // Last ditch effort - maybe it was an int stored as string? - if port, err = strconv.Atoi(param.StrVal); err != nil { - return port, err - } - } - default: - return port, fmt.Errorf("intOrString had no kind: %+v", param) - } - if port > 0 && port < 65536 { - return port, nil - } - return port, fmt.Errorf("invalid port number: %v", port) -} - -// findPortByName is a helper function to look up a port in a container by name. -func findPortByName(container *v1.Container, portName string) (int, error) { - for _, port := range container.Ports { - if port.Name == portName { - return int(port.ContainerPort), nil - } - } - return 0, fmt.Errorf("port %s not found", portName) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/OWNERS deleted file mode 100644 index 19c02ee0b10..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-scheduling-maintainers -reviewers: - - sig-scheduling -labels: - - sig/scheduling diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/OWNERS deleted file mode 100644 index 3023c572ed4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/OWNERS +++ /dev/null @@ -1,11 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - api-approvers -reviewers: - - api-reviewers - - sig-scheduling-api-reviewers - - sig-scheduling-api-approvers -labels: - - kind/api-change - - sig/scheduling diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go deleted file mode 100644 index 457556e101b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name used in this package -const GroupName = "kubescheduler.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeSchedulerConfiguration{}, - &DefaultPreemptionArgs{}, - &InterPodAffinityArgs{}, - &NodeResourcesFitArgs{}, - &PodTopologySpreadArgs{}, - &VolumeBindingArgs{}, - &NodeResourcesBalancedAllocationArgs{}, - &NodeAffinityArgs{}, - ) - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go deleted file mode 100644 index 366f776bf2d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheme - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - config "k8s.io/kubernetes/pkg/scheduler/apis/config" - configv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1" -) - -var ( - // Scheme is the runtime.Scheme to which all kubescheduler api types are registered. - Scheme = runtime.NewScheme() - - // Codecs provides access to encoding and decoding for the scheme. - Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict) -) - -func init() { - AddToScheme(Scheme) -} - -// AddToScheme builds the kubescheduler scheme using all known versions of the kubescheduler api. -func AddToScheme(scheme *runtime.Scheme) { - utilruntime.Must(config.AddToScheme(scheme)) - utilruntime.Must(configv1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority( - configv1.SchemeGroupVersion, - )) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go deleted file mode 100644 index b0c94a18eef..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "math" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - componentbaseconfig "k8s.io/component-base/config" -) - -const ( - // DefaultKubeSchedulerPort is the default port for the scheduler status server. - // May be overridden by a flag at startup. - DefaultKubeSchedulerPort = 10259 -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeSchedulerConfiguration configures a scheduler -type KubeSchedulerConfiguration struct { - // TypeMeta contains the API version and kind. In kube-scheduler, after - // conversion from the versioned KubeSchedulerConfiguration type to this - // internal type, we set the APIVersion field to the scheme group/version of - // the type we converted from. This is done in cmd/kube-scheduler in two - // places: (1) when loading config from a file, (2) generating the default - // config. Based on the versioned type set in this field, we make decisions; - // for example (1) during validation to check for usage of removed plugins, - // (2) writing config to a file, (3) initialising the scheduler. - metav1.TypeMeta - - // Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16 - Parallelism int32 - - // LeaderElection defines the configuration of leader election client. - LeaderElection componentbaseconfig.LeaderElectionConfiguration - - // ClientConnection specifies the kubeconfig file and client connection - // settings for the proxy server to use when communicating with the apiserver. - ClientConnection componentbaseconfig.ClientConnectionConfiguration - - // DebuggingConfiguration holds configuration for Debugging related features - // TODO: We might wanna make this a substruct like Debugging componentbaseconfig.DebuggingConfiguration - componentbaseconfig.DebuggingConfiguration - - // PercentageOfNodesToScore is the percentage of all nodes that once found feasible - // for running a pod, the scheduler stops its search for more feasible nodes in - // the cluster. This helps improve scheduler's performance. Scheduler always tries to find - // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. - // Example: if the cluster size is 500 nodes and the value of this flag is 30, - // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. - // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. It is overridden by profile level PercentageOfNodesToScore. - PercentageOfNodesToScore *int32 - - // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. - // If specified, it must be greater than 0. If this value is null, the default value (1s) - // will be used. - PodInitialBackoffSeconds int64 - - // PodMaxBackoffSeconds is the max backoff for unschedulable pods. - // If specified, it must be greater than or equal to podInitialBackoffSeconds. If this value is null, - // the default value (10s) will be used. - PodMaxBackoffSeconds int64 - - // Profiles are scheduling profiles that kube-scheduler supports. Pods can - // choose to be scheduled under a particular profile by setting its associated - // scheduler name. Pods that don't specify any scheduler name are scheduled - // with the "default-scheduler" profile, if present here. - Profiles []KubeSchedulerProfile - - // Extenders are the list of scheduler extenders, each holding the values of how to communicate - // with the extender. These extenders are shared by all scheduler profiles. - Extenders []Extender - - // DelayCacheUntilActive specifies when to start caching. If this is true and leader election is enabled, - // the scheduler will wait to fill informer caches until it is the leader. Doing so will have slower - // failover with the benefit of lower memory overhead while waiting to become leader. - // Defaults to false. - DelayCacheUntilActive bool -} - -// KubeSchedulerProfile is a scheduling profile. -type KubeSchedulerProfile struct { - // SchedulerName is the name of the scheduler associated to this profile. - // If SchedulerName matches with the pod's "spec.schedulerName", then the pod - // is scheduled with this profile. - SchedulerName string - - // PercentageOfNodesToScore is the percentage of all nodes that once found feasible - // for running a pod, the scheduler stops its search for more feasible nodes in - // the cluster. This helps improve scheduler's performance. Scheduler always tries to find - // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. - // Example: if the cluster size is 500 nodes and the value of this flag is 30, - // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. - // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. It will override global PercentageOfNodesToScore. If it is empty, - // global PercentageOfNodesToScore will be used. - PercentageOfNodesToScore *int32 - - // Plugins specify the set of plugins that should be enabled or disabled. - // Enabled plugins are the ones that should be enabled in addition to the - // default plugins. Disabled plugins are any of the default plugins that - // should be disabled. - // When no enabled or disabled plugin is specified for an extension point, - // default plugins for that extension point will be used if there is any. - // If a QueueSort plugin is specified, the same QueueSort Plugin and - // PluginConfig must be specified for all profiles. - Plugins *Plugins - - // PluginConfig is an optional set of custom plugin arguments for each plugin. - // Omitting config args for a plugin is equivalent to using the default config - // for that plugin. - PluginConfig []PluginConfig -} - -// Plugins include multiple extension points. When specified, the list of plugins for -// a particular extension point are the only ones enabled. If an extension point is -// omitted from the config, then the default set of plugins is used for that extension point. -// Enabled plugins are called in the order specified here, after default plugins. If they need to -// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. -type Plugins struct { - // PreEnqueue is a list of plugins that should be invoked before adding pods to the scheduling queue. - PreEnqueue PluginSet - - // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. - QueueSort PluginSet - - // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. - PreFilter PluginSet - - // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. - Filter PluginSet - - // PostFilter is a list of plugins that are invoked after filtering phase, but only when no feasible nodes were found for the pod. - PostFilter PluginSet - - // PreScore is a list of plugins that are invoked before scoring. - PreScore PluginSet - - // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. - Score PluginSet - - // Reserve is a list of plugins invoked when reserving/unreserving resources - // after a node is assigned to run the pod. - Reserve PluginSet - - // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. - Permit PluginSet - - // PreBind is a list of plugins that should be invoked before a pod is bound. - PreBind PluginSet - - // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. - // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. - Bind PluginSet - - // PostBind is a list of plugins that should be invoked after a pod is successfully bound. - PostBind PluginSet - - // MultiPoint is a simplified config field for enabling plugins for all valid extension points - MultiPoint PluginSet -} - -// PluginSet specifies enabled and disabled plugins for an extension point. -// If an array is empty, missing, or nil, default plugins at that extension point will be used. -type PluginSet struct { - // Enabled specifies plugins that should be enabled in addition to default plugins. - // These are called after default plugins and in the same order specified here. - Enabled []Plugin - // Disabled specifies default plugins that should be disabled. - // When all default plugins need to be disabled, an array containing only one "*" should be provided. - Disabled []Plugin -} - -// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. -type Plugin struct { - // Name defines the name of plugin - Name string - // Weight defines the weight of plugin, only used for Score plugins. - Weight int32 -} - -// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. -// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. -// It is up to the plugin to process these Args. -type PluginConfig struct { - // Name defines the name of plugin being configured - Name string - // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. - Args runtime.Object -} - -/* - * NOTE: The following variables and methods are intentionally left out of the staging mirror. - */ -const ( - // DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes - // that once found feasible, the scheduler stops looking for more nodes. - // A value of 0 means adaptive, meaning the scheduler figures out a proper default. - DefaultPercentageOfNodesToScore = 0 - - // MaxCustomPriorityScore is the max score UtilizationShapePoint expects. - MaxCustomPriorityScore int64 = 10 - - // MaxTotalScore is the maximum total score. - MaxTotalScore int64 = math.MaxInt64 - - // MaxWeight defines the max weight value allowed for custom PriorityPolicy - MaxWeight = MaxTotalScore / MaxCustomPriorityScore -) - -// Names returns the list of enabled plugin names. -func (p *Plugins) Names() []string { - if p == nil { - return nil - } - extensions := []PluginSet{ - p.PreEnqueue, - p.PreFilter, - p.Filter, - p.PostFilter, - p.Reserve, - p.PreScore, - p.Score, - p.PreBind, - p.Bind, - p.PostBind, - p.Permit, - p.QueueSort, - } - n := sets.New[string]() - for _, e := range extensions { - for _, pg := range e.Enabled { - n.Insert(pg.Name) - } - } - return sets.List(n) -} - -// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, -// it is assumed that the extender chose not to provide that extension. -type Extender struct { - // URLPrefix at which the extender is available - URLPrefix string - // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. - FilterVerb string - // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. - PreemptVerb string - // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. - PrioritizeVerb string - // The numeric multiplier for the node scores that the prioritize call generates. - // The weight should be a positive integer - Weight int64 - // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. - // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender - // can implement this function. - BindVerb string - // EnableHTTPS specifies whether https should be used to communicate with the extender - EnableHTTPS bool - // TLSConfig specifies the transport layer security config - TLSConfig *ExtenderTLSConfig - // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize - // timeout is ignored, k8s/other extenders priorities are used to select the node. - HTTPTimeout metav1.Duration - // NodeCacheCapable specifies that the extender is capable of caching node information, - // so the scheduler should only send minimal information about the eligible nodes - // assuming that the extender already cached full details of all nodes in the cluster - NodeCacheCapable bool - // ManagedResources is a list of extended resources that are managed by - // this extender. - // - A pod will be sent to the extender on the Filter, Prioritize and Bind - // (if the extender is the binder) phases iff the pod requests at least - // one of the extended resources in this list. If empty or unspecified, - // all pods will be sent to this extender. - // - If IgnoredByScheduler is set to true for a resource, kube-scheduler - // will skip checking the resource in predicates. - // +optional - ManagedResources []ExtenderManagedResource - // Ignorable specifies if the extender is ignorable, i.e. scheduling should not - // fail when the extender returns an error or is not reachable. - Ignorable bool -} - -// ExtenderManagedResource describes the arguments of extended resources -// managed by an extender. -type ExtenderManagedResource struct { - // Name is the extended resource name. - Name string - // IgnoredByScheduler indicates whether kube-scheduler should ignore this - // resource when applying predicates. - IgnoredByScheduler bool -} - -// ExtenderTLSConfig contains settings to enable TLS with extender -type ExtenderTLSConfig struct { - // Server should be accessed without verifying the TLS certificate. For testing only. - Insecure bool - // ServerName is passed to the server for SNI and is used in the client to check server - // certificates against. If ServerName is empty, the hostname used to contact the - // server is used. - ServerName string - - // Server requires TLS client certificate authentication - CertFile string - // Server requires TLS client certificate authentication - KeyFile string - // Trusted root certificates for server - CAFile string - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte `datapolicy:"security-key"` - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go deleted file mode 100644 index a3db9a9049b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go +++ /dev/null @@ -1,218 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DefaultPreemptionArgs holds arguments used to configure the -// DefaultPreemption plugin. -type DefaultPreemptionArgs struct { - metav1.TypeMeta - - // MinCandidateNodesPercentage is the minimum number of candidates to - // shortlist when dry running preemption as a percentage of number of nodes. - // Must be in the range [0, 100]. Defaults to 10% of the cluster size if - // unspecified. - MinCandidateNodesPercentage int32 - // MinCandidateNodesAbsolute is the absolute minimum number of candidates to - // shortlist. The likely number of candidates enumerated for dry running - // preemption is given by the formula: - // numCandidates = max(numNodes * minCandidateNodesPercentage, minCandidateNodesAbsolute) - // We say "likely" because there are other factors such as PDB violations - // that play a role in the number of candidates shortlisted. Must be at least - // 0 nodes. Defaults to 100 nodes if unspecified. - MinCandidateNodesAbsolute int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. -type InterPodAffinityArgs struct { - metav1.TypeMeta - - // HardPodAffinityWeight is the scoring weight for existing pods with a - // matching hard affinity to the incoming pod. - HardPodAffinityWeight int32 - - // IgnorePreferredTermsOfExistingPods configures the scheduler to ignore existing pods' preferred affinity - // rules when scoring candidate nodes, unless the incoming pod has inter-pod affinities. - IgnorePreferredTermsOfExistingPods bool -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. -type NodeResourcesFitArgs struct { - metav1.TypeMeta - - // IgnoredResources is the list of resources that NodeResources fit filter - // should ignore. - IgnoredResources []string - // IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. - // e.g. if group is ["example.com"], it will ignore all resource names that begin - // with "example.com", such as "example.com/aaa" and "example.com/bbb". - // A resource group name can't contain '/'. - IgnoredResourceGroups []string - - // ScoringStrategy selects the node resource scoring strategy. - ScoringStrategy *ScoringStrategy -} - -// PodTopologySpreadConstraintsDefaulting defines how to set default constraints -// for the PodTopologySpread plugin. -type PodTopologySpreadConstraintsDefaulting string - -const ( - // SystemDefaulting instructs to use the kubernetes defined default. - SystemDefaulting PodTopologySpreadConstraintsDefaulting = "System" - // ListDefaulting instructs to use the config provided default. - ListDefaulting PodTopologySpreadConstraintsDefaulting = "List" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. -type PodTopologySpreadArgs struct { - metav1.TypeMeta - - // DefaultConstraints defines topology spread constraints to be applied to - // Pods that don't define any in `pod.spec.topologySpreadConstraints`. - // `.defaultConstraints[*].labelSelectors` must be empty, as they are - // deduced from the Pod's membership to Services, ReplicationControllers, - // ReplicaSets or StatefulSets. - // When not empty, .defaultingType must be "List". - DefaultConstraints []v1.TopologySpreadConstraint - - // DefaultingType determines how .defaultConstraints are deduced. Can be one - // of "System" or "List". - // - // - "System": Use kubernetes defined constraints that spread Pods among - // Nodes and Zones. - // - "List": Use constraints defined in .defaultConstraints. - // - // Defaults to "System". - // +optional - DefaultingType PodTopologySpreadConstraintsDefaulting -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesBalancedAllocationArgs holds arguments used to configure NodeResourcesBalancedAllocation plugin. -type NodeResourcesBalancedAllocationArgs struct { - metav1.TypeMeta - - // Resources to be considered when scoring. - // The default resource set includes "cpu" and "memory", only valid weight is 1. - Resources []ResourceSpec -} - -// UtilizationShapePoint represents a single point of a priority function shape. -type UtilizationShapePoint struct { - // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int32 - // Score assigned to a given utilization (y axis). Valid values are 0 to 10. - Score int32 -} - -// ResourceSpec represents single resource. -type ResourceSpec struct { - // Name of the resource. - Name string - // Weight of the resource. - Weight int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. -type VolumeBindingArgs struct { - metav1.TypeMeta - - // BindTimeoutSeconds is the timeout in seconds in volume binding operation. - // Value must be non-negative integer. The value zero indicates no waiting. - // If this value is nil, the default value will be used. - BindTimeoutSeconds int64 - - // Shape specifies the points defining the score function shape, which is - // used to score nodes based on the utilization of statically provisioned - // PVs. The utilization is calculated by dividing the total requested - // storage of the pod by the total capacity of feasible PVs on each node. - // Each point contains utilization (ranges from 0 to 100) and its - // associated score (ranges from 0 to 10). You can turn the priority by - // specifying different scores for different utilization numbers. - // The default shape points are: - // 1) 0 for 0 utilization - // 2) 10 for 100 utilization - // All points must be sorted in increasing order by utilization. - // +featureGate=StorageCapacityScoring - // +optional - Shape []UtilizationShapePoint -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeAffinityArgs holds arguments to configure the NodeAffinity plugin. -type NodeAffinityArgs struct { - metav1.TypeMeta - - // AddedAffinity is applied to all Pods additionally to the NodeAffinity - // specified in the PodSpec. That is, Nodes need to satisfy AddedAffinity - // AND .spec.NodeAffinity. AddedAffinity is empty by default (all Nodes - // match). - // When AddedAffinity is used, some Pods with affinity requirements that match - // a specific Node (such as Daemonset Pods) might remain unschedulable. - AddedAffinity *v1.NodeAffinity -} - -// ScoringStrategyType the type of scoring strategy used in NodeResourcesFit plugin. -type ScoringStrategyType string - -const ( - // LeastAllocated strategy prioritizes nodes with least allocated resources. - LeastAllocated ScoringStrategyType = "LeastAllocated" - // MostAllocated strategy prioritizes nodes with most allocated resources. - MostAllocated ScoringStrategyType = "MostAllocated" - // RequestedToCapacityRatio strategy allows specifying a custom shape function - // to score nodes based on the request to capacity ratio. - RequestedToCapacityRatio ScoringStrategyType = "RequestedToCapacityRatio" -) - -// ScoringStrategy define ScoringStrategyType for node resource plugin -type ScoringStrategy struct { - // Type selects which strategy to run. - Type ScoringStrategyType - - // Resources to consider when scoring. - // The default resource set includes "cpu" and "memory" with an equal weight. - // Allowed weights go from 1 to 100. - // Weight defaults to 1 if not specified or explicitly set to 0. - Resources []ResourceSpec - - // Arguments specific to RequestedToCapacityRatio strategy. - RequestedToCapacityRatio *RequestedToCapacityRatioParam -} - -// RequestedToCapacityRatioParam define RequestedToCapacityRatio parameters -type RequestedToCapacityRatioParam struct { - // Shape is a list of points defining the scoring function shape. - Shape []UtilizationShapePoint -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/conversion.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/conversion.go deleted file mode 100644 index 18115f93f49..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/conversion.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - "sync" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - v1 "k8s.io/kube-scheduler/config/v1" - "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -var ( - // pluginArgConversionScheme is a scheme with internal and v1 registered, - // used for defaulting/converting typed PluginConfig Args. - // Access via getPluginArgConversionScheme() - pluginArgConversionScheme *runtime.Scheme - initPluginArgConversionScheme sync.Once -) - -func GetPluginArgConversionScheme() *runtime.Scheme { - initPluginArgConversionScheme.Do(func() { - // set up the scheme used for plugin arg conversion - pluginArgConversionScheme = runtime.NewScheme() - utilruntime.Must(AddToScheme(pluginArgConversionScheme)) - utilruntime.Must(config.AddToScheme(pluginArgConversionScheme)) - }) - return pluginArgConversionScheme -} - -func Convert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToInternalPluginConfigArgs(out) -} - -// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal -// types using a scheme, after applying defaults. -func convertToInternalPluginConfigArgs(out *config.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - prof := &out.Profiles[i] - for j := range prof.PluginConfig { - args := prof.PluginConfig[j].Args - if args == nil { - continue - } - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - internalArgs, err := scheme.ConvertToVersion(args, config.SchemeGroupVersion) - if err != nil { - return fmt.Errorf("converting .Profiles[%d].PluginConfig[%d].Args into internal type: %w", i, j, err) - } - prof.PluginConfig[j].Args = internalArgs - } - } - return nil -} - -func Convert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToExternalPluginConfigArgs(out) -} - -// convertToExternalPluginConfigArgs converts PluginConfig#Args into -// external (versioned) types using a scheme. -func convertToExternalPluginConfigArgs(out *v1.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - for j := range out.Profiles[i].PluginConfig { - args := out.Profiles[i].PluginConfig[j].Args - if args.Object == nil { - continue - } - if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown { - continue - } - externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion) - if err != nil { - return err - } - out.Profiles[i].PluginConfig[j].Args.Object = externalArgs - } - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go deleted file mode 100644 index baad8cd0f9d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - v1 "k8s.io/kube-scheduler/config/v1" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/ptr" -) - -// getDefaultPlugins returns the default set of plugins. -func getDefaultPlugins() *v1.Plugins { - plugins := &v1.Plugins{ - MultiPoint: v1.PluginSet{ - Enabled: []v1.Plugin{ - {Name: names.SchedulingGates}, - {Name: names.PrioritySort}, - {Name: names.NodeUnschedulable}, - {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, - {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, - {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, - {Name: names.VolumeRestrictions}, - {Name: names.NodeVolumeLimits}, - {Name: names.VolumeBinding}, - {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, - {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, - {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, - {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, - {Name: names.DefaultBinder}, - }, - }, - } - applyFeatureGates(plugins) - - return plugins -} - -func applyFeatureGates(config *v1.Plugins) { - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - // This plugin should come before DefaultPreemption because if - // there is a problem with a Pod and PostFilter gets called to - // resolve the problem, it is better to first deallocate an - // idle ResourceClaim than it is to evict some Pod that might - // be doing useful work. - for i := range config.MultiPoint.Enabled { - if config.MultiPoint.Enabled[i].Name == names.DefaultPreemption { - extended := make([]v1.Plugin, 0, len(config.MultiPoint.Enabled)+1) - extended = append(extended, config.MultiPoint.Enabled[:i]...) - extended = append(extended, v1.Plugin{Name: names.DynamicResources}) - extended = append(extended, config.MultiPoint.Enabled[i:]...) - config.MultiPoint.Enabled = extended - break - } - } - } -} - -// mergePlugins merges the custom set into the given default one, handling disabled sets. -func mergePlugins(logger klog.Logger, defaultPlugins, customPlugins *v1.Plugins) *v1.Plugins { - if customPlugins == nil { - return defaultPlugins - } - - defaultPlugins.MultiPoint = mergePluginSet(logger, defaultPlugins.MultiPoint, customPlugins.MultiPoint) - defaultPlugins.PreEnqueue = mergePluginSet(logger, defaultPlugins.PreEnqueue, customPlugins.PreEnqueue) - defaultPlugins.QueueSort = mergePluginSet(logger, defaultPlugins.QueueSort, customPlugins.QueueSort) - defaultPlugins.PreFilter = mergePluginSet(logger, defaultPlugins.PreFilter, customPlugins.PreFilter) - defaultPlugins.Filter = mergePluginSet(logger, defaultPlugins.Filter, customPlugins.Filter) - defaultPlugins.PostFilter = mergePluginSet(logger, defaultPlugins.PostFilter, customPlugins.PostFilter) - defaultPlugins.PreScore = mergePluginSet(logger, defaultPlugins.PreScore, customPlugins.PreScore) - defaultPlugins.Score = mergePluginSet(logger, defaultPlugins.Score, customPlugins.Score) - defaultPlugins.Reserve = mergePluginSet(logger, defaultPlugins.Reserve, customPlugins.Reserve) - defaultPlugins.Permit = mergePluginSet(logger, defaultPlugins.Permit, customPlugins.Permit) - defaultPlugins.PreBind = mergePluginSet(logger, defaultPlugins.PreBind, customPlugins.PreBind) - defaultPlugins.Bind = mergePluginSet(logger, defaultPlugins.Bind, customPlugins.Bind) - defaultPlugins.PostBind = mergePluginSet(logger, defaultPlugins.PostBind, customPlugins.PostBind) - return defaultPlugins -} - -type pluginIndex struct { - index int - plugin v1.Plugin -} - -func mergePluginSet(logger klog.Logger, defaultPluginSet, customPluginSet v1.PluginSet) v1.PluginSet { - disabledPlugins := sets.New[string]() - enabledCustomPlugins := make(map[string]pluginIndex) - // replacedPluginIndex is a set of index of plugins, which have replaced the default plugins. - replacedPluginIndex := sets.New[int]() - var disabled []v1.Plugin - for _, disabledPlugin := range customPluginSet.Disabled { - // if the user is manually disabling any (or all, with "*") default plugins for an extension point, - // we need to track that so that the MultiPoint extension logic in the framework can know to skip - // inserting unspecified default plugins to this point. - disabled = append(disabled, v1.Plugin{Name: disabledPlugin.Name}) - disabledPlugins.Insert(disabledPlugin.Name) - } - - // With MultiPoint, we may now have some disabledPlugins in the default registry - // For example, we enable PluginX with Filter+Score through MultiPoint but disable its Score plugin by default. - for _, disabledPlugin := range defaultPluginSet.Disabled { - disabled = append(disabled, v1.Plugin{Name: disabledPlugin.Name}) - disabledPlugins.Insert(disabledPlugin.Name) - } - - for index, enabledPlugin := range customPluginSet.Enabled { - enabledCustomPlugins[enabledPlugin.Name] = pluginIndex{index, enabledPlugin} - } - var enabledPlugins []v1.Plugin - if !disabledPlugins.Has("*") { - for _, defaultEnabledPlugin := range defaultPluginSet.Enabled { - if disabledPlugins.Has(defaultEnabledPlugin.Name) { - continue - } - // The default plugin is explicitly re-configured, update the default plugin accordingly. - if customPlugin, ok := enabledCustomPlugins[defaultEnabledPlugin.Name]; ok { - logger.Info("Default plugin is explicitly re-configured; overriding", "plugin", defaultEnabledPlugin.Name) - // Update the default plugin in place to preserve order. - defaultEnabledPlugin = customPlugin.plugin - replacedPluginIndex.Insert(customPlugin.index) - } - enabledPlugins = append(enabledPlugins, defaultEnabledPlugin) - } - } - - // Append all the custom plugins which haven't replaced any default plugins. - // Note: duplicated custom plugins will still be appended here. - // If so, the instantiation of scheduler framework will detect it and abort. - for index, plugin := range customPluginSet.Enabled { - if !replacedPluginIndex.Has(index) { - enabledPlugins = append(enabledPlugins, plugin) - } - } - return v1.PluginSet{Enabled: enabledPlugins, Disabled: disabled} -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go deleted file mode 100644 index 1310ea47a93..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/util/feature" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" - "k8s.io/klog/v2" - configv1 "k8s.io/kube-scheduler/config/v1" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/utils/ptr" -) - -var defaultResourceSpec = []configv1.ResourceSpec{ - {Name: string(v1.ResourceCPU), Weight: 1}, - {Name: string(v1.ResourceMemory), Weight: 1}, -} - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func pluginsNames(p *configv1.Plugins) []string { - if p == nil { - return nil - } - extensions := []configv1.PluginSet{ - p.MultiPoint, - p.PreFilter, - p.Filter, - p.PostFilter, - p.Reserve, - p.PreScore, - p.Score, - p.PreBind, - p.Bind, - p.PostBind, - p.Permit, - p.PreEnqueue, - p.QueueSort, - } - n := sets.New[string]() - for _, e := range extensions { - for _, pg := range e.Enabled { - n.Insert(pg.Name) - } - } - return sets.List(n) -} - -func setDefaults_KubeSchedulerProfile(logger klog.Logger, prof *configv1.KubeSchedulerProfile) { - // Set default plugins. - prof.Plugins = mergePlugins(logger, getDefaultPlugins(), prof.Plugins) - // Set default plugin configs. - scheme := GetPluginArgConversionScheme() - existingConfigs := sets.New[string]() - for j := range prof.PluginConfig { - existingConfigs.Insert(prof.PluginConfig[j].Name) - args := prof.PluginConfig[j].Args.Object - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - scheme.Default(args) - } - - // Append default configs for plugins that didn't have one explicitly set. - for _, name := range pluginsNames(prof.Plugins) { - if existingConfigs.Has(name) { - continue - } - gvk := configv1.SchemeGroupVersion.WithKind(name + "Args") - args, err := scheme.New(gvk) - if err != nil { - // This plugin is out-of-tree or doesn't require configuration. - continue - } - scheme.Default(args) - args.GetObjectKind().SetGroupVersionKind(gvk) - prof.PluginConfig = append(prof.PluginConfig, configv1.PluginConfig{ - Name: name, - Args: runtime.RawExtension{Object: args}, - }) - } -} - -// SetDefaults_KubeSchedulerConfiguration sets additional defaults -func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfiguration) { - logger := klog.TODO() // called by generated code that doesn't pass a logger. See #115724 - if obj.Parallelism == nil { - obj.Parallelism = ptr.To[int32](16) - } - - if len(obj.Profiles) == 0 { - obj.Profiles = append(obj.Profiles, configv1.KubeSchedulerProfile{}) - } - // Only apply a default scheduler name when there is a single profile. - // Validation will ensure that every profile has a non-empty unique name. - if len(obj.Profiles) == 1 && obj.Profiles[0].SchedulerName == nil { - obj.Profiles[0].SchedulerName = ptr.To(v1.DefaultSchedulerName) - } - - // Add the default set of plugins and apply the configuration. - for i := range obj.Profiles { - prof := &obj.Profiles[i] - setDefaults_KubeSchedulerProfile(logger, prof) - } - - if obj.PercentageOfNodesToScore == nil { - obj.PercentageOfNodesToScore = ptr.To[int32](config.DefaultPercentageOfNodesToScore) - } - - if len(obj.LeaderElection.ResourceLock) == 0 { - // Use lease-based leader election to reduce cost. - // We migrated for EndpointsLease lock in 1.17 and starting in 1.20 we - // migrated to Lease lock. - obj.LeaderElection.ResourceLock = "leases" - } - if len(obj.LeaderElection.ResourceNamespace) == 0 { - obj.LeaderElection.ResourceNamespace = configv1.SchedulerDefaultLockObjectNamespace - } - if len(obj.LeaderElection.ResourceName) == 0 { - obj.LeaderElection.ResourceName = configv1.SchedulerDefaultLockObjectName - } - - if len(obj.ClientConnection.ContentType) == 0 { - obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf" - } - // Scheduler has an opinion about QPS/Burst, setting specific defaults for itself, instead of generic settings. - if obj.ClientConnection.QPS == 0.0 { - obj.ClientConnection.QPS = 50.0 - } - if obj.ClientConnection.Burst == 0 { - obj.ClientConnection.Burst = 100 - } - - // Use the default LeaderElectionConfiguration options - componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection) - - if obj.PodInitialBackoffSeconds == nil { - obj.PodInitialBackoffSeconds = ptr.To[int64](1) - } - - if obj.PodMaxBackoffSeconds == nil { - obj.PodMaxBackoffSeconds = ptr.To[int64](10) - } - - // Enable profiling by default in the scheduler - if obj.EnableProfiling == nil { - obj.EnableProfiling = ptr.To(true) - } - - // Enable contention profiling by default if profiling is enabled - if *obj.EnableProfiling && obj.EnableContentionProfiling == nil { - obj.EnableContentionProfiling = ptr.To(true) - } -} - -func SetDefaults_DefaultPreemptionArgs(obj *configv1.DefaultPreemptionArgs) { - if obj.MinCandidateNodesPercentage == nil { - obj.MinCandidateNodesPercentage = ptr.To[int32](10) - } - if obj.MinCandidateNodesAbsolute == nil { - obj.MinCandidateNodesAbsolute = ptr.To[int32](100) - } -} - -func SetDefaults_InterPodAffinityArgs(obj *configv1.InterPodAffinityArgs) { - if obj.HardPodAffinityWeight == nil { - obj.HardPodAffinityWeight = ptr.To[int32](1) - } -} - -func SetDefaults_VolumeBindingArgs(obj *configv1.VolumeBindingArgs) { - if obj.BindTimeoutSeconds == nil { - obj.BindTimeoutSeconds = ptr.To[int64](600) - } - if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring) { - obj.Shape = []configv1.UtilizationShapePoint{ - { - Utilization: 0, - Score: int32(config.MaxCustomPriorityScore), - }, - { - Utilization: 100, - Score: 0, - }, - } - } -} - -func SetDefaults_NodeResourcesBalancedAllocationArgs(obj *configv1.NodeResourcesBalancedAllocationArgs) { - if len(obj.Resources) == 0 { - obj.Resources = defaultResourceSpec - return - } - // If the weight is not set or it is explicitly set to 0, then apply the default weight(1) instead. - for i := range obj.Resources { - if obj.Resources[i].Weight == 0 { - obj.Resources[i].Weight = 1 - } - } -} - -func SetDefaults_PodTopologySpreadArgs(obj *configv1.PodTopologySpreadArgs) { - if obj.DefaultingType == "" { - obj.DefaultingType = configv1.SystemDefaulting - } -} - -func SetDefaults_NodeResourcesFitArgs(obj *configv1.NodeResourcesFitArgs) { - if obj.ScoringStrategy == nil { - obj.ScoringStrategy = &configv1.ScoringStrategy{ - Type: configv1.ScoringStrategyType(config.LeastAllocated), - Resources: defaultResourceSpec, - } - } - if len(obj.ScoringStrategy.Resources) == 0 { - // If no resources specified, use the default set. - obj.ScoringStrategy.Resources = append(obj.ScoringStrategy.Resources, defaultResourceSpec...) - } - for i := range obj.ScoringStrategy.Resources { - if obj.ScoringStrategy.Resources[i].Weight == 0 { - obj.ScoringStrategy.Resources[i].Weight = 1 - } - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go deleted file mode 100644 index 4e2ff445697..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/scheduler/apis/config -// +k8s:conversion-gen-external-types=k8s.io/kube-scheduler/config/v1 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=k8s.io/kube-scheduler/config/v1 -// +groupName=kubescheduler.config.k8s.io - -package v1 diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go deleted file mode 100644 index 9a32736c837..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/kube-scheduler/config/v1" -) - -// GroupName is the group name used in this package -const GroupName = v1.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = v1.SchemeGroupVersion - -var ( - // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, - // defaulting and conversion init funcs are registered as well. - localSchemeBuilder = &v1.SchemeBuilder - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go deleted file mode 100644 index ab8518c8837..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go +++ /dev/null @@ -1,946 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1 - -import ( - unsafe "unsafe" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha1 "k8s.io/component-base/config/v1alpha1" - configv1 "k8s.io/kube-scheduler/config/v1" - config "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*configv1.DefaultPreemptionArgs)(nil), (*config.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(a.(*configv1.DefaultPreemptionArgs), b.(*config.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.DefaultPreemptionArgs)(nil), (*configv1.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(a.(*config.DefaultPreemptionArgs), b.(*configv1.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.Extender)(nil), (*config.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Extender_To_config_Extender(a.(*configv1.Extender), b.(*config.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Extender)(nil), (*configv1.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Extender_To_v1_Extender(a.(*config.Extender), b.(*configv1.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.ExtenderManagedResource)(nil), (*config.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(a.(*configv1.ExtenderManagedResource), b.(*config.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderManagedResource)(nil), (*configv1.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(a.(*config.ExtenderManagedResource), b.(*configv1.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.ExtenderTLSConfig)(nil), (*config.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(a.(*configv1.ExtenderTLSConfig), b.(*config.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderTLSConfig)(nil), (*configv1.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(a.(*config.ExtenderTLSConfig), b.(*configv1.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.InterPodAffinityArgs)(nil), (*config.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(a.(*configv1.InterPodAffinityArgs), b.(*config.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.InterPodAffinityArgs)(nil), (*configv1.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(a.(*config.InterPodAffinityArgs), b.(*configv1.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.KubeSchedulerProfile)(nil), (*config.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(a.(*configv1.KubeSchedulerProfile), b.(*config.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.KubeSchedulerProfile)(nil), (*configv1.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*configv1.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.NodeAffinityArgs)(nil), (*config.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(a.(*configv1.NodeAffinityArgs), b.(*config.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeAffinityArgs)(nil), (*configv1.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(a.(*config.NodeAffinityArgs), b.(*configv1.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.NodeResourcesBalancedAllocationArgs)(nil), (*config.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(a.(*configv1.NodeResourcesBalancedAllocationArgs), b.(*config.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesBalancedAllocationArgs)(nil), (*configv1.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(a.(*config.NodeResourcesBalancedAllocationArgs), b.(*configv1.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.NodeResourcesFitArgs)(nil), (*config.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(a.(*configv1.NodeResourcesFitArgs), b.(*config.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesFitArgs)(nil), (*configv1.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(a.(*config.NodeResourcesFitArgs), b.(*configv1.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.Plugin)(nil), (*config.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Plugin_To_config_Plugin(a.(*configv1.Plugin), b.(*config.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Plugin)(nil), (*configv1.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugin_To_v1_Plugin(a.(*config.Plugin), b.(*configv1.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.PluginConfig)(nil), (*config.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PluginConfig_To_config_PluginConfig(a.(*configv1.PluginConfig), b.(*config.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginConfig)(nil), (*configv1.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginConfig_To_v1_PluginConfig(a.(*config.PluginConfig), b.(*configv1.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.PluginSet)(nil), (*config.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PluginSet_To_config_PluginSet(a.(*configv1.PluginSet), b.(*config.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginSet)(nil), (*configv1.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginSet_To_v1_PluginSet(a.(*config.PluginSet), b.(*configv1.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.Plugins)(nil), (*config.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Plugins_To_config_Plugins(a.(*configv1.Plugins), b.(*config.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Plugins)(nil), (*configv1.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugins_To_v1_Plugins(a.(*config.Plugins), b.(*configv1.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.PodTopologySpreadArgs)(nil), (*config.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(a.(*configv1.PodTopologySpreadArgs), b.(*config.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PodTopologySpreadArgs)(nil), (*configv1.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(a.(*config.PodTopologySpreadArgs), b.(*configv1.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.RequestedToCapacityRatioParam)(nil), (*config.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(a.(*configv1.RequestedToCapacityRatioParam), b.(*config.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.RequestedToCapacityRatioParam)(nil), (*configv1.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(a.(*config.RequestedToCapacityRatioParam), b.(*configv1.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.ResourceSpec)(nil), (*config.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceSpec_To_config_ResourceSpec(a.(*configv1.ResourceSpec), b.(*config.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ResourceSpec)(nil), (*configv1.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ResourceSpec_To_v1_ResourceSpec(a.(*config.ResourceSpec), b.(*configv1.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.ScoringStrategy)(nil), (*config.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScoringStrategy_To_config_ScoringStrategy(a.(*configv1.ScoringStrategy), b.(*config.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ScoringStrategy)(nil), (*configv1.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ScoringStrategy_To_v1_ScoringStrategy(a.(*config.ScoringStrategy), b.(*configv1.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.UtilizationShapePoint)(nil), (*config.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(a.(*configv1.UtilizationShapePoint), b.(*config.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.UtilizationShapePoint)(nil), (*configv1.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(a.(*config.UtilizationShapePoint), b.(*configv1.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*configv1.VolumeBindingArgs)(nil), (*config.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(a.(*configv1.VolumeBindingArgs), b.(*config.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.VolumeBindingArgs)(nil), (*configv1.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(a.(*config.VolumeBindingArgs), b.(*configv1.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.KubeSchedulerConfiguration)(nil), (*configv1.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(a.(*config.KubeSchedulerConfiguration), b.(*configv1.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*configv1.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*configv1.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *configv1.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - if err := metav1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := metav1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *configv1.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *configv1.DefaultPreemptionArgs, s conversion.Scope) error { - if err := metav1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := metav1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *configv1.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_v1_Extender_To_config_Extender(in *configv1.Extender, out *config.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*config.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]config.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_v1_Extender_To_config_Extender is an autogenerated conversion function. -func Convert_v1_Extender_To_config_Extender(in *configv1.Extender, out *config.Extender, s conversion.Scope) error { - return autoConvert_v1_Extender_To_config_Extender(in, out, s) -} - -func autoConvert_config_Extender_To_v1_Extender(in *config.Extender, out *configv1.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*configv1.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]configv1.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_config_Extender_To_v1_Extender is an autogenerated conversion function. -func Convert_config_Extender_To_v1_Extender(in *config.Extender, out *configv1.Extender, s conversion.Scope) error { - return autoConvert_config_Extender_To_v1_Extender(in, out, s) -} - -func autoConvert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in *configv1.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource is an autogenerated conversion function. -func Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in *configv1.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in, out, s) -} - -func autoConvert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in *config.ExtenderManagedResource, out *configv1.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource is an autogenerated conversion function. -func Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in *config.ExtenderManagedResource, out *configv1.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in, out, s) -} - -func autoConvert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *configv1.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *configv1.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *configv1.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *configv1.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *configv1.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - if err := metav1.Convert_Pointer_int32_To_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods - return nil -} - -// Convert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *configv1.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *configv1.InterPodAffinityArgs, s conversion.Scope) error { - if err := metav1.Convert_int32_To_Pointer_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods - return nil -} - -// Convert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *configv1.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *configv1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := metav1.Convert_Pointer_int32_To_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := metav1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := metav1.Convert_Pointer_int64_To_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]config.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]config.Extender)(unsafe.Pointer(&in.Extenders)) - out.DelayCacheUntilActive = in.DelayCacheUntilActive - return nil -} - -func autoConvert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *configv1.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := metav1.Convert_int32_To_Pointer_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := metav1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := metav1.Convert_int64_To_Pointer_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]configv1.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]configv1.Extender)(unsafe.Pointer(&in.Extenders)) - out.DelayCacheUntilActive = in.DelayCacheUntilActive - return nil -} - -func autoConvert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *configv1.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - if err := metav1.Convert_Pointer_string_To_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(config.Plugins) - if err := Convert_v1_Plugins_To_config_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]config.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_v1_PluginConfig_To_config_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -// Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile is an autogenerated conversion function. -func Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *configv1.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in, out, s) -} - -func autoConvert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *configv1.KubeSchedulerProfile, s conversion.Scope) error { - if err := metav1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(configv1.Plugins) - if err := Convert_config_Plugins_To_v1_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]configv1.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_config_PluginConfig_To_v1_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -// Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile is an autogenerated conversion function. -func Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *configv1.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in, out, s) -} - -func autoConvert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(in *configv1.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs is an autogenerated conversion function. -func Convert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(in *configv1.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(in, out, s) -} - -func autoConvert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(in *config.NodeAffinityArgs, out *configv1.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs is an autogenerated conversion function. -func Convert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(in *config.NodeAffinityArgs, out *configv1.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(in, out, s) -} - -func autoConvert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *configv1.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *configv1.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *configv1.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]configv1.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *configv1.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *configv1.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*config.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *configv1.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *configv1.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*configv1.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *configv1.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_v1_Plugin_To_config_Plugin(in *configv1.Plugin, out *config.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := metav1.Convert_Pointer_int32_To_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Plugin_To_config_Plugin is an autogenerated conversion function. -func Convert_v1_Plugin_To_config_Plugin(in *configv1.Plugin, out *config.Plugin, s conversion.Scope) error { - return autoConvert_v1_Plugin_To_config_Plugin(in, out, s) -} - -func autoConvert_config_Plugin_To_v1_Plugin(in *config.Plugin, out *configv1.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := metav1.Convert_int32_To_Pointer_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_config_Plugin_To_v1_Plugin is an autogenerated conversion function. -func Convert_config_Plugin_To_v1_Plugin(in *config.Plugin, out *configv1.Plugin, s conversion.Scope) error { - return autoConvert_config_Plugin_To_v1_Plugin(in, out, s) -} - -func autoConvert_v1_PluginConfig_To_config_PluginConfig(in *configv1.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PluginConfig_To_config_PluginConfig is an autogenerated conversion function. -func Convert_v1_PluginConfig_To_config_PluginConfig(in *configv1.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - return autoConvert_v1_PluginConfig_To_config_PluginConfig(in, out, s) -} - -func autoConvert_config_PluginConfig_To_v1_PluginConfig(in *config.PluginConfig, out *configv1.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_config_PluginConfig_To_v1_PluginConfig is an autogenerated conversion function. -func Convert_config_PluginConfig_To_v1_PluginConfig(in *config.PluginConfig, out *configv1.PluginConfig, s conversion.Scope) error { - return autoConvert_config_PluginConfig_To_v1_PluginConfig(in, out, s) -} - -func autoConvert_v1_PluginSet_To_config_PluginSet(in *configv1.PluginSet, out *config.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_v1_PluginSet_To_config_PluginSet is an autogenerated conversion function. -func Convert_v1_PluginSet_To_config_PluginSet(in *configv1.PluginSet, out *config.PluginSet, s conversion.Scope) error { - return autoConvert_v1_PluginSet_To_config_PluginSet(in, out, s) -} - -func autoConvert_config_PluginSet_To_v1_PluginSet(in *config.PluginSet, out *configv1.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]configv1.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]configv1.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_config_PluginSet_To_v1_PluginSet is an autogenerated conversion function. -func Convert_config_PluginSet_To_v1_PluginSet(in *config.PluginSet, out *configv1.PluginSet, s conversion.Scope) error { - return autoConvert_config_PluginSet_To_v1_PluginSet(in, out, s) -} - -func autoConvert_v1_Plugins_To_config_Plugins(in *configv1.Plugins, out *config.Plugins, s conversion.Scope) error { - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Plugins_To_config_Plugins is an autogenerated conversion function. -func Convert_v1_Plugins_To_config_Plugins(in *configv1.Plugins, out *config.Plugins, s conversion.Scope) error { - return autoConvert_v1_Plugins_To_config_Plugins(in, out, s) -} - -func autoConvert_config_Plugins_To_v1_Plugins(in *config.Plugins, out *configv1.Plugins, s conversion.Scope) error { - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -// Convert_config_Plugins_To_v1_Plugins is an autogenerated conversion function. -func Convert_config_Plugins_To_v1_Plugins(in *config.Plugins, out *configv1.Plugins, s conversion.Scope) error { - return autoConvert_config_Plugins_To_v1_Plugins(in, out, s) -} - -func autoConvert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *configv1.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = config.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *configv1.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *configv1.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = configv1.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *configv1.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *configv1.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *configv1.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *configv1.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]configv1.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *configv1.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_v1_ResourceSpec_To_config_ResourceSpec(in *configv1.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_v1_ResourceSpec_To_config_ResourceSpec is an autogenerated conversion function. -func Convert_v1_ResourceSpec_To_config_ResourceSpec(in *configv1.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - return autoConvert_v1_ResourceSpec_To_config_ResourceSpec(in, out, s) -} - -func autoConvert_config_ResourceSpec_To_v1_ResourceSpec(in *config.ResourceSpec, out *configv1.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_config_ResourceSpec_To_v1_ResourceSpec is an autogenerated conversion function. -func Convert_config_ResourceSpec_To_v1_ResourceSpec(in *config.ResourceSpec, out *configv1.ResourceSpec, s conversion.Scope) error { - return autoConvert_config_ResourceSpec_To_v1_ResourceSpec(in, out, s) -} - -func autoConvert_v1_ScoringStrategy_To_config_ScoringStrategy(in *configv1.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - out.Type = config.ScoringStrategyType(in.Type) - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*config.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_v1_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function. -func Convert_v1_ScoringStrategy_To_config_ScoringStrategy(in *configv1.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - return autoConvert_v1_ScoringStrategy_To_config_ScoringStrategy(in, out, s) -} - -func autoConvert_config_ScoringStrategy_To_v1_ScoringStrategy(in *config.ScoringStrategy, out *configv1.ScoringStrategy, s conversion.Scope) error { - out.Type = configv1.ScoringStrategyType(in.Type) - out.Resources = *(*[]configv1.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*configv1.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_config_ScoringStrategy_To_v1_ScoringStrategy is an autogenerated conversion function. -func Convert_config_ScoringStrategy_To_v1_ScoringStrategy(in *config.ScoringStrategy, out *configv1.ScoringStrategy, s conversion.Scope) error { - return autoConvert_config_ScoringStrategy_To_v1_ScoringStrategy(in, out, s) -} - -func autoConvert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in *configv1.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint is an autogenerated conversion function. -func Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in *configv1.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in, out, s) -} - -func autoConvert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in *config.UtilizationShapePoint, out *configv1.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint is an autogenerated conversion function. -func Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in *config.UtilizationShapePoint, out *configv1.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in, out, s) -} - -func autoConvert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(in *configv1.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - if err := metav1.Convert_Pointer_int64_To_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs is an autogenerated conversion function. -func Convert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(in *configv1.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(in, out, s) -} - -func autoConvert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(in *config.VolumeBindingArgs, out *configv1.VolumeBindingArgs, s conversion.Scope) error { - if err := metav1.Convert_int64_To_Pointer_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]configv1.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs is an autogenerated conversion function. -func Convert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(in *config.VolumeBindingArgs, out *configv1.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(in, out, s) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go deleted file mode 100644 index 87181b43048..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go deleted file mode 100644 index 9f86c91923f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - configv1 "k8s.io/kube-scheduler/config/v1" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&configv1.DefaultPreemptionArgs{}, func(obj interface{}) { SetObjectDefaults_DefaultPreemptionArgs(obj.(*configv1.DefaultPreemptionArgs)) }) - scheme.AddTypeDefaultingFunc(&configv1.InterPodAffinityArgs{}, func(obj interface{}) { SetObjectDefaults_InterPodAffinityArgs(obj.(*configv1.InterPodAffinityArgs)) }) - scheme.AddTypeDefaultingFunc(&configv1.KubeSchedulerConfiguration{}, func(obj interface{}) { - SetObjectDefaults_KubeSchedulerConfiguration(obj.(*configv1.KubeSchedulerConfiguration)) - }) - scheme.AddTypeDefaultingFunc(&configv1.NodeResourcesBalancedAllocationArgs{}, func(obj interface{}) { - SetObjectDefaults_NodeResourcesBalancedAllocationArgs(obj.(*configv1.NodeResourcesBalancedAllocationArgs)) - }) - scheme.AddTypeDefaultingFunc(&configv1.NodeResourcesFitArgs{}, func(obj interface{}) { SetObjectDefaults_NodeResourcesFitArgs(obj.(*configv1.NodeResourcesFitArgs)) }) - scheme.AddTypeDefaultingFunc(&configv1.PodTopologySpreadArgs{}, func(obj interface{}) { SetObjectDefaults_PodTopologySpreadArgs(obj.(*configv1.PodTopologySpreadArgs)) }) - scheme.AddTypeDefaultingFunc(&configv1.VolumeBindingArgs{}, func(obj interface{}) { SetObjectDefaults_VolumeBindingArgs(obj.(*configv1.VolumeBindingArgs)) }) - return nil -} - -func SetObjectDefaults_DefaultPreemptionArgs(in *configv1.DefaultPreemptionArgs) { - SetDefaults_DefaultPreemptionArgs(in) -} - -func SetObjectDefaults_InterPodAffinityArgs(in *configv1.InterPodAffinityArgs) { - SetDefaults_InterPodAffinityArgs(in) -} - -func SetObjectDefaults_KubeSchedulerConfiguration(in *configv1.KubeSchedulerConfiguration) { - SetDefaults_KubeSchedulerConfiguration(in) -} - -func SetObjectDefaults_NodeResourcesBalancedAllocationArgs(in *configv1.NodeResourcesBalancedAllocationArgs) { - SetDefaults_NodeResourcesBalancedAllocationArgs(in) -} - -func SetObjectDefaults_NodeResourcesFitArgs(in *configv1.NodeResourcesFitArgs) { - SetDefaults_NodeResourcesFitArgs(in) -} - -func SetObjectDefaults_PodTopologySpreadArgs(in *configv1.PodTopologySpreadArgs) { - SetDefaults_PodTopologySpreadArgs(in) -} - -func SetObjectDefaults_VolumeBindingArgs(in *configv1.VolumeBindingArgs) { - SetDefaults_VolumeBindingArgs(in) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go deleted file mode 100644 index 2f67fa629c4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go +++ /dev/null @@ -1,296 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "reflect" - - v1 "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" - componentbasevalidation "k8s.io/component-base/config/validation" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -// ValidateKubeSchedulerConfiguration ensures validation of the KubeSchedulerConfiguration struct -func ValidateKubeSchedulerConfiguration(cc *config.KubeSchedulerConfiguration) utilerrors.Aggregate { - var errs []error - errs = append(errs, componentbasevalidation.ValidateClientConnectionConfiguration(&cc.ClientConnection, field.NewPath("clientConnection")).ToAggregate()) - errs = append(errs, componentbasevalidation.ValidateLeaderElectionConfiguration(&cc.LeaderElection, field.NewPath("leaderElection")).ToAggregate()) - - // TODO: This can be removed when ResourceLock is not available - // Only ResourceLock values with leases are allowed - if cc.LeaderElection.LeaderElect && cc.LeaderElection.ResourceLock != "leases" { - leaderElectionPath := field.NewPath("leaderElection") - errs = append(errs, field.Invalid(leaderElectionPath.Child("resourceLock"), cc.LeaderElection.ResourceLock, `resourceLock value must be "leases"`)) - } - - profilesPath := field.NewPath("profiles") - if cc.Parallelism <= 0 { - errs = append(errs, field.Invalid(field.NewPath("parallelism"), cc.Parallelism, "should be an integer value greater than zero")) - } - - if len(cc.Profiles) == 0 { - errs = append(errs, field.Required(profilesPath, "")) - } else { - existingProfiles := make(map[string]int, len(cc.Profiles)) - for i := range cc.Profiles { - profile := &cc.Profiles[i] - path := profilesPath.Index(i) - errs = append(errs, validateKubeSchedulerProfile(path, cc.APIVersion, profile)...) - if idx, ok := existingProfiles[profile.SchedulerName]; ok { - errs = append(errs, field.Duplicate(path.Child("schedulerName"), profilesPath.Index(idx).Child("schedulerName"))) - } - existingProfiles[profile.SchedulerName] = i - } - errs = append(errs, validateCommonQueueSort(profilesPath, cc.Profiles)...) - } - - errs = append(errs, validatePercentageOfNodesToScore(field.NewPath("percentageOfNodesToScore"), cc.PercentageOfNodesToScore)) - - if cc.PodInitialBackoffSeconds <= 0 { - errs = append(errs, field.Invalid(field.NewPath("podInitialBackoffSeconds"), - cc.PodInitialBackoffSeconds, "must be greater than 0")) - } - if cc.PodMaxBackoffSeconds < cc.PodInitialBackoffSeconds { - errs = append(errs, field.Invalid(field.NewPath("podMaxBackoffSeconds"), - cc.PodMaxBackoffSeconds, "must be greater than or equal to PodInitialBackoffSeconds")) - } - - errs = append(errs, validateExtenders(field.NewPath("extenders"), cc.Extenders)...) - return utilerrors.Flatten(utilerrors.NewAggregate(errs)) -} - -func validatePercentageOfNodesToScore(path *field.Path, percentageOfNodesToScore *int32) error { - if percentageOfNodesToScore != nil { - if *percentageOfNodesToScore < 0 || *percentageOfNodesToScore > 100 { - return field.Invalid(path, *percentageOfNodesToScore, "not in valid range [0-100]") - } - } - return nil -} - -type invalidPlugins struct { - schemeGroupVersion string - plugins []string -} - -// invalidPluginsByVersion maintains a list of removed/deprecated plugins in each version. -// Remember to add an entry to that list when creating a new component config -// version (even if the list of invalid plugins is empty). -var invalidPluginsByVersion = []invalidPlugins{ - { - schemeGroupVersion: v1.SchemeGroupVersion.String(), - plugins: []string{ - "AzureDiskLimits", - "CinderLimits", - "EBSLimits", - "GCEPDLimits", - }, - }, -} - -// isPluginInvalid checks if a given plugin was removed/deprecated in the given component -// config version or earlier. -func isPluginInvalid(apiVersion string, name string) (bool, string) { - for _, dp := range invalidPluginsByVersion { - for _, plugin := range dp.plugins { - if name == plugin { - return true, dp.schemeGroupVersion - } - } - if apiVersion == dp.schemeGroupVersion { - break - } - } - return false, "" -} - -func validatePluginSetForInvalidPlugins(path *field.Path, apiVersion string, ps config.PluginSet) []error { - var errs []error - for i, plugin := range ps.Enabled { - if invalid, invalidVersion := isPluginInvalid(apiVersion, plugin.Name); invalid { - errs = append(errs, field.Invalid(path.Child("enabled").Index(i), plugin.Name, fmt.Sprintf("was invalid in version %q (KubeSchedulerConfiguration is version %q)", invalidVersion, apiVersion))) - } - } - return errs -} - -func validateKubeSchedulerProfile(path *field.Path, apiVersion string, profile *config.KubeSchedulerProfile) []error { - var errs []error - if len(profile.SchedulerName) == 0 { - errs = append(errs, field.Required(path.Child("schedulerName"), "")) - } - errs = append(errs, validatePercentageOfNodesToScore(path.Child("percentageOfNodesToScore"), profile.PercentageOfNodesToScore)) - errs = append(errs, validatePluginConfig(path, apiVersion, profile)...) - return errs -} - -func validatePluginConfig(path *field.Path, apiVersion string, profile *config.KubeSchedulerProfile) []error { - var errs []error - m := map[string]interface{}{ - "DefaultPreemption": ValidateDefaultPreemptionArgs, - "InterPodAffinity": ValidateInterPodAffinityArgs, - "NodeAffinity": ValidateNodeAffinityArgs, - "NodeResourcesBalancedAllocation": ValidateNodeResourcesBalancedAllocationArgs, - "NodeResourcesFitArgs": ValidateNodeResourcesFitArgs, - "PodTopologySpread": ValidatePodTopologySpreadArgs, - "VolumeBinding": ValidateVolumeBindingArgs, - } - - if profile.Plugins != nil { - stagesToPluginSet := map[string]config.PluginSet{ - "preEnqueue": profile.Plugins.PreEnqueue, - "queueSort": profile.Plugins.QueueSort, - "preFilter": profile.Plugins.PreFilter, - "filter": profile.Plugins.Filter, - "postFilter": profile.Plugins.PostFilter, - "preScore": profile.Plugins.PreScore, - "score": profile.Plugins.Score, - "reserve": profile.Plugins.Reserve, - "permit": profile.Plugins.Permit, - "preBind": profile.Plugins.PreBind, - "bind": profile.Plugins.Bind, - "postBind": profile.Plugins.PostBind, - } - - pluginsPath := path.Child("plugins") - for s, p := range stagesToPluginSet { - errs = append(errs, validatePluginSetForInvalidPlugins( - pluginsPath.Child(s), apiVersion, p)...) - } - } - - seenPluginConfig := sets.New[string]() - - for i := range profile.PluginConfig { - pluginConfigPath := path.Child("pluginConfig").Index(i) - name := profile.PluginConfig[i].Name - args := profile.PluginConfig[i].Args - if seenPluginConfig.Has(name) { - errs = append(errs, field.Duplicate(pluginConfigPath, name)) - } else { - seenPluginConfig.Insert(name) - } - if invalid, invalidVersion := isPluginInvalid(apiVersion, name); invalid { - errs = append(errs, field.Invalid(pluginConfigPath, name, fmt.Sprintf("was invalid in version %q (KubeSchedulerConfiguration is version %q)", invalidVersion, apiVersion))) - } else if validateFunc, ok := m[name]; ok { - // type mismatch, no need to validate the `args`. - if reflect.TypeOf(args) != reflect.ValueOf(validateFunc).Type().In(1) { - errs = append(errs, field.Invalid(pluginConfigPath.Child("args"), args, "has to match plugin args")) - } else { - in := []reflect.Value{reflect.ValueOf(pluginConfigPath.Child("args")), reflect.ValueOf(args)} - res := reflect.ValueOf(validateFunc).Call(in) - // It's possible that validation function return a Aggregate, just append here and it will be flattened at the end of CC validation. - if res[0].Interface() != nil { - errs = append(errs, res[0].Interface().(error)) - } - } - } - } - return errs -} - -func validateCommonQueueSort(path *field.Path, profiles []config.KubeSchedulerProfile) []error { - var errs []error - var canon config.PluginSet - var queueSortName string - var queueSortArgs runtime.Object - if profiles[0].Plugins != nil { - canon = profiles[0].Plugins.QueueSort - if len(profiles[0].Plugins.QueueSort.Enabled) != 0 { - queueSortName = profiles[0].Plugins.QueueSort.Enabled[0].Name - } - length := len(profiles[0].Plugins.QueueSort.Enabled) - if length > 1 { - errs = append(errs, field.Invalid(path.Index(0).Child("plugins", "queueSort", "Enabled"), length, "only one queue sort plugin can be enabled")) - } - } - for _, cfg := range profiles[0].PluginConfig { - if len(queueSortName) > 0 && cfg.Name == queueSortName { - queueSortArgs = cfg.Args - } - } - for i := 1; i < len(profiles); i++ { - var curr config.PluginSet - if profiles[i].Plugins != nil { - curr = profiles[i].Plugins.QueueSort - } - if !apiequality.Semantic.DeepEqual(canon, curr) { - errs = append(errs, field.Invalid(path.Index(i).Child("plugins", "queueSort"), curr, "queueSort must be the same for all profiles")) - } - for _, cfg := range profiles[i].PluginConfig { - if cfg.Name == queueSortName && !apiequality.Semantic.DeepEqual(queueSortArgs, cfg.Args) { - errs = append(errs, field.Invalid(path.Index(i).Child("pluginConfig", "args"), cfg.Args, "queueSort must be the same for all profiles")) - } - } - } - return errs -} - -// validateExtenders validates the configured extenders for the Scheduler -func validateExtenders(fldPath *field.Path, extenders []config.Extender) []error { - var errs []error - binders := 0 - extenderManagedResources := sets.New[string]() - for i, extender := range extenders { - path := fldPath.Index(i) - if len(extender.PrioritizeVerb) > 0 && extender.Weight <= 0 { - errs = append(errs, field.Invalid(path.Child("weight"), - extender.Weight, "must have a positive weight applied to it")) - } - if extender.BindVerb != "" { - binders++ - } - for j, resource := range extender.ManagedResources { - managedResourcesPath := path.Child("managedResources").Index(j) - validationErrors := validateExtendedResourceName(managedResourcesPath.Child("name"), v1.ResourceName(resource.Name)) - errs = append(errs, validationErrors...) - if extenderManagedResources.Has(resource.Name) { - errs = append(errs, field.Invalid(managedResourcesPath.Child("name"), - resource.Name, "duplicate extender managed resource name")) - } - extenderManagedResources.Insert(resource.Name) - } - } - if binders > 1 { - errs = append(errs, field.Invalid(fldPath, fmt.Sprintf("found %d extenders implementing bind", binders), "only one extender can implement bind")) - } - return errs -} - -// validateExtendedResourceName checks whether the specified name is a valid -// extended resource name. -func validateExtendedResourceName(path *field.Path, name v1.ResourceName) []error { - var validationErrors []error - for _, msg := range validation.IsQualifiedName(string(name)) { - validationErrors = append(validationErrors, field.Invalid(path, name, msg)) - } - if len(validationErrors) != 0 { - return validationErrors - } - if !v1helper.IsExtendedResourceName(name) { - validationErrors = append(validationErrors, field.Invalid(path, string(name), "is an invalid extended resource name")) - } - return validationErrors -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation_pluginargs.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation_pluginargs.go deleted file mode 100644 index e3797dc8734..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation_pluginargs.go +++ /dev/null @@ -1,329 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "strings" - - v1 "k8s.io/api/core/v1" - metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" - "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation/field" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -// supportedScoringStrategyTypes has to be a set of strings for use with field.Unsupported -var supportedScoringStrategyTypes = sets.New( - string(config.LeastAllocated), - string(config.MostAllocated), - string(config.RequestedToCapacityRatio), -) - -// ValidateDefaultPreemptionArgs validates that DefaultPreemptionArgs are correct. -func ValidateDefaultPreemptionArgs(path *field.Path, args *config.DefaultPreemptionArgs) error { - var allErrs field.ErrorList - percentagePath := path.Child("minCandidateNodesPercentage") - absolutePath := path.Child("minCandidateNodesAbsolute") - if err := validateMinCandidateNodesPercentage(args.MinCandidateNodesPercentage, percentagePath); err != nil { - allErrs = append(allErrs, err) - } - if err := validateMinCandidateNodesAbsolute(args.MinCandidateNodesAbsolute, absolutePath); err != nil { - allErrs = append(allErrs, err) - } - if args.MinCandidateNodesPercentage == 0 && args.MinCandidateNodesAbsolute == 0 { - allErrs = append(allErrs, - field.Invalid(percentagePath, args.MinCandidateNodesPercentage, "cannot be zero at the same time as minCandidateNodesAbsolute"), - field.Invalid(absolutePath, args.MinCandidateNodesAbsolute, "cannot be zero at the same time as minCandidateNodesPercentage")) - } - return allErrs.ToAggregate() -} - -// validateMinCandidateNodesPercentage validates that -// minCandidateNodesPercentage is within the allowed range. -func validateMinCandidateNodesPercentage(minCandidateNodesPercentage int32, p *field.Path) *field.Error { - if minCandidateNodesPercentage < 0 || minCandidateNodesPercentage > 100 { - return field.Invalid(p, minCandidateNodesPercentage, "not in valid range [0, 100]") - } - return nil -} - -// validateMinCandidateNodesAbsolute validates that minCandidateNodesAbsolute -// is within the allowed range. -func validateMinCandidateNodesAbsolute(minCandidateNodesAbsolute int32, p *field.Path) *field.Error { - if minCandidateNodesAbsolute < 0 { - return field.Invalid(p, minCandidateNodesAbsolute, "not in valid range [0, inf)") - } - return nil -} - -// ValidateInterPodAffinityArgs validates that InterPodAffinityArgs are correct. -func ValidateInterPodAffinityArgs(path *field.Path, args *config.InterPodAffinityArgs) error { - return validateHardPodAffinityWeight(path.Child("hardPodAffinityWeight"), args.HardPodAffinityWeight) -} - -// validateHardPodAffinityWeight validates that weight is within allowed range. -func validateHardPodAffinityWeight(path *field.Path, w int32) error { - const ( - minHardPodAffinityWeight = 0 - maxHardPodAffinityWeight = 100 - ) - - if w < minHardPodAffinityWeight || w > maxHardPodAffinityWeight { - msg := fmt.Sprintf("not in valid range [%d, %d]", minHardPodAffinityWeight, maxHardPodAffinityWeight) - return field.Invalid(path, w, msg) - } - return nil -} - -// ValidatePodTopologySpreadArgs validates that PodTopologySpreadArgs are correct. -// It replicates the validation from pkg/apis/core/validation.validateTopologySpreadConstraints -// with an additional check for .labelSelector to be nil. -func ValidatePodTopologySpreadArgs(path *field.Path, args *config.PodTopologySpreadArgs) error { - var allErrs field.ErrorList - if err := validateDefaultingType(path.Child("defaultingType"), args.DefaultingType, args.DefaultConstraints); err != nil { - allErrs = append(allErrs, err) - } - - defaultConstraintsPath := path.Child("defaultConstraints") - for i, c := range args.DefaultConstraints { - p := defaultConstraintsPath.Index(i) - if c.MaxSkew <= 0 { - f := p.Child("maxSkew") - allErrs = append(allErrs, field.Invalid(f, c.MaxSkew, "not in valid range (0, inf)")) - } - allErrs = append(allErrs, validateTopologyKey(p.Child("topologyKey"), c.TopologyKey)...) - if err := validateWhenUnsatisfiable(p.Child("whenUnsatisfiable"), c.WhenUnsatisfiable); err != nil { - allErrs = append(allErrs, err) - } - if c.LabelSelector != nil { - f := field.Forbidden(p.Child("labelSelector"), "constraint must not define a selector, as they deduced for each pod") - allErrs = append(allErrs, f) - } - if err := validateConstraintNotRepeat(defaultConstraintsPath, args.DefaultConstraints, i); err != nil { - allErrs = append(allErrs, err) - } - } - if len(allErrs) == 0 { - return nil - } - return allErrs.ToAggregate() -} - -func validateDefaultingType(p *field.Path, v config.PodTopologySpreadConstraintsDefaulting, constraints []v1.TopologySpreadConstraint) *field.Error { - if v != config.SystemDefaulting && v != config.ListDefaulting { - return field.NotSupported(p, v, []string{string(config.SystemDefaulting), string(config.ListDefaulting)}) - } - if v == config.SystemDefaulting && len(constraints) > 0 { - return field.Invalid(p, v, "when .defaultConstraints are not empty") - } - return nil -} - -func validateTopologyKey(p *field.Path, v string) field.ErrorList { - var allErrs field.ErrorList - if len(v) == 0 { - allErrs = append(allErrs, field.Required(p, "can not be empty")) - } else { - allErrs = append(allErrs, metav1validation.ValidateLabelName(v, p)...) - } - return allErrs -} - -func validateWhenUnsatisfiable(p *field.Path, v v1.UnsatisfiableConstraintAction) *field.Error { - supportedScheduleActions := sets.New(string(v1.DoNotSchedule), string(v1.ScheduleAnyway)) - - if len(v) == 0 { - return field.Required(p, "can not be empty") - } - if !supportedScheduleActions.Has(string(v)) { - return field.NotSupported(p, v, sets.List(supportedScheduleActions)) - } - return nil -} - -func validateConstraintNotRepeat(path *field.Path, constraints []v1.TopologySpreadConstraint, idx int) *field.Error { - c := &constraints[idx] - for i := range constraints[:idx] { - other := &constraints[i] - if c.TopologyKey == other.TopologyKey && c.WhenUnsatisfiable == other.WhenUnsatisfiable { - return field.Duplicate(path.Index(idx), fmt.Sprintf("{%v, %v}", c.TopologyKey, c.WhenUnsatisfiable)) - } - } - return nil -} - -func validateFunctionShape(shape []config.UtilizationShapePoint, path *field.Path) field.ErrorList { - const ( - minUtilization = 0 - maxUtilization = 100 - minScore = 0 - maxScore = int32(config.MaxCustomPriorityScore) - ) - - var allErrs field.ErrorList - - if len(shape) == 0 { - allErrs = append(allErrs, field.Required(path, "at least one point must be specified")) - return allErrs - } - - for i := 1; i < len(shape); i++ { - if shape[i-1].Utilization >= shape[i].Utilization { - allErrs = append(allErrs, field.Invalid(path.Index(i).Child("utilization"), shape[i].Utilization, "utilization values must be sorted in increasing order")) - break - } - } - - for i, point := range shape { - if point.Utilization < minUtilization || point.Utilization > maxUtilization { - msg := fmt.Sprintf("not in valid range [%d, %d]", minUtilization, maxUtilization) - allErrs = append(allErrs, field.Invalid(path.Index(i).Child("utilization"), point.Utilization, msg)) - } - - if point.Score < minScore || point.Score > maxScore { - msg := fmt.Sprintf("not in valid range [%d, %d]", minScore, maxScore) - allErrs = append(allErrs, field.Invalid(path.Index(i).Child("score"), point.Score, msg)) - } - } - - return allErrs -} - -func validateResources(resources []config.ResourceSpec, p *field.Path) field.ErrorList { - var allErrs field.ErrorList - for i, resource := range resources { - if resource.Weight <= 0 || resource.Weight > 100 { - msg := fmt.Sprintf("resource weight of %v not in valid range (0, 100]", resource.Name) - allErrs = append(allErrs, field.Invalid(p.Index(i).Child("weight"), resource.Weight, msg)) - } - } - return allErrs -} - -// ValidateNodeResourcesBalancedAllocationArgs validates that NodeResourcesBalancedAllocationArgs are set correctly. -func ValidateNodeResourcesBalancedAllocationArgs(path *field.Path, args *config.NodeResourcesBalancedAllocationArgs) error { - var allErrs field.ErrorList - seenResources := sets.New[string]() - for i, resource := range args.Resources { - if seenResources.Has(resource.Name) { - allErrs = append(allErrs, field.Duplicate(path.Child("resources").Index(i).Child("name"), resource.Name)) - } else { - seenResources.Insert(resource.Name) - } - if resource.Weight != 1 { - allErrs = append(allErrs, field.Invalid(path.Child("resources").Index(i).Child("weight"), resource.Weight, "must be 1")) - } - } - return allErrs.ToAggregate() -} - -// ValidateNodeAffinityArgs validates that NodeAffinityArgs are correct. -func ValidateNodeAffinityArgs(path *field.Path, args *config.NodeAffinityArgs) error { - if args.AddedAffinity == nil { - return nil - } - affinity := args.AddedAffinity - var errs []error - if ns := affinity.RequiredDuringSchedulingIgnoredDuringExecution; ns != nil { - _, err := nodeaffinity.NewNodeSelector(ns, field.WithPath(path.Child("addedAffinity", "requiredDuringSchedulingIgnoredDuringExecution"))) - if err != nil { - errs = append(errs, err) - } - } - // TODO: Add validation for requiredDuringSchedulingRequiredDuringExecution when it gets added to the API. - if terms := affinity.PreferredDuringSchedulingIgnoredDuringExecution; len(terms) != 0 { - _, err := nodeaffinity.NewPreferredSchedulingTerms(terms, field.WithPath(path.Child("addedAffinity", "preferredDuringSchedulingIgnoredDuringExecution"))) - if err != nil { - errs = append(errs, err) - } - } - return errors.Flatten(errors.NewAggregate(errs)) -} - -// VolumeBindingArgsValidationOptions contains the different settings for validation. -type VolumeBindingArgsValidationOptions struct { - AllowStorageCapacityScoring bool -} - -// ValidateVolumeBindingArgs validates that VolumeBindingArgs are set correctly. -func ValidateVolumeBindingArgs(path *field.Path, args *config.VolumeBindingArgs) error { - return ValidateVolumeBindingArgsWithOptions(path, args, VolumeBindingArgsValidationOptions{ - AllowStorageCapacityScoring: utilfeature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring), - }) -} - -// ValidateVolumeBindingArgsWithOptions validates that VolumeBindingArgs and VolumeBindingArgsValidationOptions with scheduler features. -func ValidateVolumeBindingArgsWithOptions(path *field.Path, args *config.VolumeBindingArgs, opts VolumeBindingArgsValidationOptions) error { - var allErrs field.ErrorList - - if args.BindTimeoutSeconds < 0 { - allErrs = append(allErrs, field.Invalid(path.Child("bindTimeoutSeconds"), args.BindTimeoutSeconds, "invalid BindTimeoutSeconds, should not be a negative value")) - } - - if opts.AllowStorageCapacityScoring { - allErrs = append(allErrs, validateFunctionShape(args.Shape, path.Child("shape"))...) - } else if args.Shape != nil { - // When the feature is off, return an error if the config is not nil. - // This prevents unexpected configuration from taking effect when the - // feature turns on in the future. - allErrs = append(allErrs, field.Invalid(path.Child("shape"), args.Shape, "unexpected field `shape`, remove it or turn on the feature gate StorageCapacityScoring")) - } - return allErrs.ToAggregate() -} - -func ValidateNodeResourcesFitArgs(path *field.Path, args *config.NodeResourcesFitArgs) error { - var allErrs field.ErrorList - resPath := path.Child("ignoredResources") - for i, res := range args.IgnoredResources { - path := resPath.Index(i) - if errs := metav1validation.ValidateLabelName(res, path); len(errs) != 0 { - allErrs = append(allErrs, errs...) - } - } - - groupPath := path.Child("ignoredResourceGroups") - for i, group := range args.IgnoredResourceGroups { - path := groupPath.Index(i) - if strings.Contains(group, "/") { - allErrs = append(allErrs, field.Invalid(path, group, "resource group name can't contain '/'")) - } - if errs := metav1validation.ValidateLabelName(group, path); len(errs) != 0 { - allErrs = append(allErrs, errs...) - } - } - - strategyPath := path.Child("scoringStrategy") - if args.ScoringStrategy != nil { - if !supportedScoringStrategyTypes.Has(string(args.ScoringStrategy.Type)) { - allErrs = append(allErrs, field.NotSupported(strategyPath.Child("type"), args.ScoringStrategy.Type, sets.List(supportedScoringStrategyTypes))) - } - allErrs = append(allErrs, validateResources(args.ScoringStrategy.Resources, strategyPath.Child("resources"))...) - if args.ScoringStrategy.RequestedToCapacityRatio != nil { - allErrs = append(allErrs, validateFunctionShape(args.ScoringStrategy.RequestedToCapacityRatio.Shape, strategyPath.Child("shape"))...) - } - } - - if len(allErrs) == 0 { - return nil - } - return allErrs.ToAggregate() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go deleted file mode 100644 index f5baa62218c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go +++ /dev/null @@ -1,562 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package config - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DefaultPreemptionArgs) DeepCopyInto(out *DefaultPreemptionArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultPreemptionArgs. -func (in *DefaultPreemptionArgs) DeepCopy() *DefaultPreemptionArgs { - if in == nil { - return nil - } - out := new(DefaultPreemptionArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DefaultPreemptionArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Extender) DeepCopyInto(out *Extender) { - *out = *in - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(ExtenderTLSConfig) - (*in).DeepCopyInto(*out) - } - out.HTTPTimeout = in.HTTPTimeout - if in.ManagedResources != nil { - in, out := &in.ManagedResources, &out.ManagedResources - *out = make([]ExtenderManagedResource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. -func (in *Extender) DeepCopy() *Extender { - if in == nil { - return nil - } - out := new(Extender) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. -func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { - if in == nil { - return nil - } - out := new(ExtenderManagedResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { - *out = *in - if in.CertData != nil { - in, out := &in.CertData, &out.CertData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.KeyData != nil { - in, out := &in.KeyData, &out.KeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.CAData != nil { - in, out := &in.CAData, &out.CAData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig. -func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { - if in == nil { - return nil - } - out := new(ExtenderTLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InterPodAffinityArgs) DeepCopyInto(out *InterPodAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterPodAffinityArgs. -func (in *InterPodAffinityArgs) DeepCopy() *InterPodAffinityArgs { - if in == nil { - return nil - } - out := new(InterPodAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InterPodAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - out.LeaderElection = in.LeaderElection - out.ClientConnection = in.ClientConnection - out.DebuggingConfiguration = in.DebuggingConfiguration - if in.PercentageOfNodesToScore != nil { - in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore - *out = new(int32) - **out = **in - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]KubeSchedulerProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Extenders != nil { - in, out := &in.Extenders, &out.Extenders - *out = make([]Extender, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. -func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { - if in == nil { - return nil - } - out := new(KubeSchedulerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { - *out = *in - if in.PercentageOfNodesToScore != nil { - in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore - *out = new(int32) - **out = **in - } - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(Plugins) - (*in).DeepCopyInto(*out) - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]PluginConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile. -func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile { - if in == nil { - return nil - } - out := new(KubeSchedulerProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinityArgs) DeepCopyInto(out *NodeAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.AddedAffinity != nil { - in, out := &in.AddedAffinity, &out.AddedAffinity - *out = new(v1.NodeAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinityArgs. -func (in *NodeAffinityArgs) DeepCopy() *NodeAffinityArgs { - if in == nil { - return nil - } - out := new(NodeAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyInto(out *NodeResourcesBalancedAllocationArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesBalancedAllocationArgs. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopy() *NodeResourcesBalancedAllocationArgs { - if in == nil { - return nil - } - out := new(NodeResourcesBalancedAllocationArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesFitArgs) DeepCopyInto(out *NodeResourcesFitArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.IgnoredResources != nil { - in, out := &in.IgnoredResources, &out.IgnoredResources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IgnoredResourceGroups != nil { - in, out := &in.IgnoredResourceGroups, &out.IgnoredResourceGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ScoringStrategy != nil { - in, out := &in.ScoringStrategy, &out.ScoringStrategy - *out = new(ScoringStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesFitArgs. -func (in *NodeResourcesFitArgs) DeepCopy() *NodeResourcesFitArgs { - if in == nil { - return nil - } - out := new(NodeResourcesFitArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugin) DeepCopyInto(out *Plugin) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. -func (in *Plugin) DeepCopy() *Plugin { - if in == nil { - return nil - } - out := new(Plugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { - *out = *in - if in.Args != nil { - out.Args = in.Args.DeepCopyObject() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. -func (in *PluginConfig) DeepCopy() *PluginConfig { - if in == nil { - return nil - } - out := new(PluginConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginSet) DeepCopyInto(out *PluginSet) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]Plugin, len(*in)) - copy(*out, *in) - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]Plugin, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. -func (in *PluginSet) DeepCopy() *PluginSet { - if in == nil { - return nil - } - out := new(PluginSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugins) DeepCopyInto(out *Plugins) { - *out = *in - in.PreEnqueue.DeepCopyInto(&out.PreEnqueue) - in.QueueSort.DeepCopyInto(&out.QueueSort) - in.PreFilter.DeepCopyInto(&out.PreFilter) - in.Filter.DeepCopyInto(&out.Filter) - in.PostFilter.DeepCopyInto(&out.PostFilter) - in.PreScore.DeepCopyInto(&out.PreScore) - in.Score.DeepCopyInto(&out.Score) - in.Reserve.DeepCopyInto(&out.Reserve) - in.Permit.DeepCopyInto(&out.Permit) - in.PreBind.DeepCopyInto(&out.PreBind) - in.Bind.DeepCopyInto(&out.Bind) - in.PostBind.DeepCopyInto(&out.PostBind) - in.MultiPoint.DeepCopyInto(&out.MultiPoint) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. -func (in *Plugins) DeepCopy() *Plugins { - if in == nil { - return nil - } - out := new(Plugins) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTopologySpreadArgs) DeepCopyInto(out *PodTopologySpreadArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.DefaultConstraints != nil { - in, out := &in.DefaultConstraints, &out.DefaultConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologySpreadArgs. -func (in *PodTopologySpreadArgs) DeepCopy() *PodTopologySpreadArgs { - if in == nil { - return nil - } - out := new(PodTopologySpreadArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequestedToCapacityRatioParam) DeepCopyInto(out *RequestedToCapacityRatioParam) { - *out = *in - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioParam. -func (in *RequestedToCapacityRatioParam) DeepCopy() *RequestedToCapacityRatioParam { - if in == nil { - return nil - } - out := new(RequestedToCapacityRatioParam) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in *ResourceSpec) DeepCopy() *ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - if in.RequestedToCapacityRatio != nil { - in, out := &in.RequestedToCapacityRatio, &out.RequestedToCapacityRatio - *out = new(RequestedToCapacityRatioParam) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy. -func (in *ScoringStrategy) DeepCopy() *ScoringStrategy { - if in == nil { - return nil - } - out := new(ScoringStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. -func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { - if in == nil { - return nil - } - out := new(UtilizationShapePoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeBindingArgs) DeepCopyInto(out *VolumeBindingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBindingArgs. -func (in *VolumeBindingArgs) DeepCopy() *VolumeBindingArgs { - if in == nil { - return nil - } - out := new(VolumeBindingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeBindingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/cache.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/cache.go deleted file mode 100644 index 97d54011051..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/cache.go +++ /dev/null @@ -1,768 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/metrics" -) - -var ( - cleanAssumedPeriod = 1 * time.Second -) - -// New returns a Cache implementation. -// It automatically starts a go routine that manages expiration of assumed pods. -// "ttl" is how long the assumed pod will get expired. -// "ctx" is the context that would close the background goroutine. -func New(ctx context.Context, ttl time.Duration) Cache { - logger := klog.FromContext(ctx) - cache := newCache(ctx, ttl, cleanAssumedPeriod) - cache.run(logger) - return cache -} - -// nodeInfoListItem holds a NodeInfo pointer and acts as an item in a doubly -// linked list. When a NodeInfo is updated, it goes to the head of the list. -// The items closer to the head are the most recently updated items. -type nodeInfoListItem struct { - info *framework.NodeInfo - next *nodeInfoListItem - prev *nodeInfoListItem -} - -type cacheImpl struct { - stop <-chan struct{} - ttl time.Duration - period time.Duration - - // This mutex guards all fields within this cache struct. - mu sync.RWMutex - // a set of assumed pod keys. - // The key could further be used to get an entry in podStates. - assumedPods sets.Set[string] - // a map from pod key to podState. - podStates map[string]*podState - nodes map[string]*nodeInfoListItem - // headNode points to the most recently updated NodeInfo in "nodes". It is the - // head of the linked list. - headNode *nodeInfoListItem - nodeTree *nodeTree - // A map from image name to its ImageStateSummary. - imageStates map[string]*framework.ImageStateSummary -} - -type podState struct { - pod *v1.Pod - // Used by assumedPod to determinate expiration. - // If deadline is nil, assumedPod will never expire. - deadline *time.Time - // Used to block cache from expiring assumedPod if binding still runs - bindingFinished bool -} - -func newCache(ctx context.Context, ttl, period time.Duration) *cacheImpl { - logger := klog.FromContext(ctx) - return &cacheImpl{ - ttl: ttl, - period: period, - stop: ctx.Done(), - - nodes: make(map[string]*nodeInfoListItem), - nodeTree: newNodeTree(logger, nil), - assumedPods: sets.New[string](), - podStates: make(map[string]*podState), - imageStates: make(map[string]*framework.ImageStateSummary), - } -} - -// newNodeInfoListItem initializes a new nodeInfoListItem. -func newNodeInfoListItem(ni *framework.NodeInfo) *nodeInfoListItem { - return &nodeInfoListItem{ - info: ni, - } -} - -// moveNodeInfoToHead moves a NodeInfo to the head of "cache.nodes" doubly -// linked list. The head is the most recently updated NodeInfo. -// We assume cache lock is already acquired. -func (cache *cacheImpl) moveNodeInfoToHead(logger klog.Logger, name string) { - ni, ok := cache.nodes[name] - if !ok { - logger.Error(nil, "No node info with given name found in the cache", "node", klog.KRef("", name)) - return - } - // if the node info list item is already at the head, we are done. - if ni == cache.headNode { - return - } - - if ni.prev != nil { - ni.prev.next = ni.next - } - if ni.next != nil { - ni.next.prev = ni.prev - } - if cache.headNode != nil { - cache.headNode.prev = ni - } - ni.next = cache.headNode - ni.prev = nil - cache.headNode = ni -} - -// removeNodeInfoFromList removes a NodeInfo from the "cache.nodes" doubly -// linked list. -// We assume cache lock is already acquired. -func (cache *cacheImpl) removeNodeInfoFromList(logger klog.Logger, name string) { - ni, ok := cache.nodes[name] - if !ok { - logger.Error(nil, "No node info with given name found in the cache", "node", klog.KRef("", name)) - return - } - - if ni.prev != nil { - ni.prev.next = ni.next - } - if ni.next != nil { - ni.next.prev = ni.prev - } - // if the removed item was at the head, we must update the head. - if ni == cache.headNode { - cache.headNode = ni.next - } - delete(cache.nodes, name) -} - -// Dump produces a dump of the current scheduler cache. This is used for -// debugging purposes only and shouldn't be confused with UpdateSnapshot -// function. -// This method is expensive, and should be only used in non-critical path. -func (cache *cacheImpl) Dump() *Dump { - cache.mu.RLock() - defer cache.mu.RUnlock() - - nodes := make(map[string]*framework.NodeInfo, len(cache.nodes)) - for k, v := range cache.nodes { - nodes[k] = v.info.Snapshot() - } - - return &Dump{ - Nodes: nodes, - AssumedPods: cache.assumedPods.Union(nil), - } -} - -// UpdateSnapshot takes a snapshot of cached NodeInfo map. This is called at -// beginning of every scheduling cycle. -// The snapshot only includes Nodes that are not deleted at the time this function is called. -// nodeInfo.Node() is guaranteed to be not nil for all the nodes in the snapshot. -// This function tracks generation number of NodeInfo and updates only the -// entries of an existing snapshot that have changed after the snapshot was taken. -func (cache *cacheImpl) UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapshot) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - // Get the last generation of the snapshot. - snapshotGeneration := nodeSnapshot.generation - - // NodeInfoList and HavePodsWithAffinityNodeInfoList must be re-created if a node was added - // or removed from the cache. - updateAllLists := false - // HavePodsWithAffinityNodeInfoList must be re-created if a node changed its - // status from having pods with affinity to NOT having pods with affinity or the other - // way around. - updateNodesHavePodsWithAffinity := false - // HavePodsWithRequiredAntiAffinityNodeInfoList must be re-created if a node changed its - // status from having pods with required anti-affinity to NOT having pods with required - // anti-affinity or the other way around. - updateNodesHavePodsWithRequiredAntiAffinity := false - // usedPVCSet must be re-created whenever the head node generation is greater than - // last snapshot generation. - updateUsedPVCSet := false - - // Start from the head of the NodeInfo doubly linked list and update snapshot - // of NodeInfos updated after the last snapshot. - for node := cache.headNode; node != nil; node = node.next { - if node.info.Generation <= snapshotGeneration { - // all the nodes are updated before the existing snapshot. We are done. - break - } - if np := node.info.Node(); np != nil { - existing, ok := nodeSnapshot.nodeInfoMap[np.Name] - if !ok { - updateAllLists = true - existing = &framework.NodeInfo{} - nodeSnapshot.nodeInfoMap[np.Name] = existing - } - clone := node.info.Snapshot() - // We track nodes that have pods with affinity, here we check if this node changed its - // status from having pods with affinity to NOT having pods with affinity or the other - // way around. - if (len(existing.PodsWithAffinity) > 0) != (len(clone.PodsWithAffinity) > 0) { - updateNodesHavePodsWithAffinity = true - } - if (len(existing.PodsWithRequiredAntiAffinity) > 0) != (len(clone.PodsWithRequiredAntiAffinity) > 0) { - updateNodesHavePodsWithRequiredAntiAffinity = true - } - if !updateUsedPVCSet { - if len(existing.PVCRefCounts) != len(clone.PVCRefCounts) { - updateUsedPVCSet = true - } else { - for pvcKey := range clone.PVCRefCounts { - if _, found := existing.PVCRefCounts[pvcKey]; !found { - updateUsedPVCSet = true - break - } - } - } - } - // We need to preserve the original pointer of the NodeInfo struct since it - // is used in the NodeInfoList, which we may not update. - *existing = *clone - } - } - // Update the snapshot generation with the latest NodeInfo generation. - if cache.headNode != nil { - nodeSnapshot.generation = cache.headNode.info.Generation - } - - // Comparing to pods in nodeTree. - // Deleted nodes get removed from the tree, but they might remain in the nodes map - // if they still have non-deleted Pods. - if len(nodeSnapshot.nodeInfoMap) > cache.nodeTree.numNodes { - cache.removeDeletedNodesFromSnapshot(nodeSnapshot) - updateAllLists = true - } - - if updateAllLists || updateNodesHavePodsWithAffinity || updateNodesHavePodsWithRequiredAntiAffinity || updateUsedPVCSet { - cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, updateAllLists) - } - - if len(nodeSnapshot.nodeInfoList) != cache.nodeTree.numNodes { - errMsg := fmt.Sprintf("snapshot state is not consistent, length of NodeInfoList=%v not equal to length of nodes in tree=%v "+ - ", length of NodeInfoMap=%v, length of nodes in cache=%v"+ - ", trying to recover", - len(nodeSnapshot.nodeInfoList), cache.nodeTree.numNodes, - len(nodeSnapshot.nodeInfoMap), len(cache.nodes)) - logger.Error(nil, errMsg) - // We will try to recover by re-creating the lists for the next scheduling cycle, but still return an - // error to surface the problem, the error will likely cause a failure to the current scheduling cycle. - cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, true) - return errors.New(errMsg) - } - - return nil -} - -func (cache *cacheImpl) updateNodeInfoSnapshotList(logger klog.Logger, snapshot *Snapshot, updateAll bool) { - snapshot.havePodsWithAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) - snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) - snapshot.usedPVCSet = sets.New[string]() - if updateAll { - // Take a snapshot of the nodes order in the tree - snapshot.nodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) - nodesList, err := cache.nodeTree.list() - if err != nil { - logger.Error(err, "Error occurred while retrieving the list of names of the nodes from node tree") - } - for _, nodeName := range nodesList { - if nodeInfo := snapshot.nodeInfoMap[nodeName]; nodeInfo != nil { - snapshot.nodeInfoList = append(snapshot.nodeInfoList, nodeInfo) - if len(nodeInfo.PodsWithAffinity) > 0 { - snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, nodeInfo) - } - if len(nodeInfo.PodsWithRequiredAntiAffinity) > 0 { - snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = append(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList, nodeInfo) - } - for key := range nodeInfo.PVCRefCounts { - snapshot.usedPVCSet.Insert(key) - } - } else { - logger.Error(nil, "Node exists in nodeTree but not in NodeInfoMap, this should not happen", "node", klog.KRef("", nodeName)) - } - } - } else { - for _, nodeInfo := range snapshot.nodeInfoList { - if len(nodeInfo.PodsWithAffinity) > 0 { - snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, nodeInfo) - } - if len(nodeInfo.PodsWithRequiredAntiAffinity) > 0 { - snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = append(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList, nodeInfo) - } - for key := range nodeInfo.PVCRefCounts { - snapshot.usedPVCSet.Insert(key) - } - } - } -} - -// If certain nodes were deleted after the last snapshot was taken, we should remove them from the snapshot. -func (cache *cacheImpl) removeDeletedNodesFromSnapshot(snapshot *Snapshot) { - toDelete := len(snapshot.nodeInfoMap) - cache.nodeTree.numNodes - for name := range snapshot.nodeInfoMap { - if toDelete <= 0 { - break - } - if n, ok := cache.nodes[name]; !ok || n.info.Node() == nil { - delete(snapshot.nodeInfoMap, name) - toDelete-- - } - } -} - -// NodeCount returns the number of nodes in the cache. -// DO NOT use outside of tests. -func (cache *cacheImpl) NodeCount() int { - cache.mu.RLock() - defer cache.mu.RUnlock() - return len(cache.nodes) -} - -// PodCount returns the number of pods in the cache (including those from deleted nodes). -// DO NOT use outside of tests. -func (cache *cacheImpl) PodCount() (int, error) { - cache.mu.RLock() - defer cache.mu.RUnlock() - // podFilter is expected to return true for most or all of the pods. We - // can avoid expensive array growth without wasting too much memory by - // pre-allocating capacity. - count := 0 - for _, n := range cache.nodes { - count += len(n.info.Pods) - } - return count, nil -} - -func (cache *cacheImpl) AssumePod(logger klog.Logger, pod *v1.Pod) error { - key, err := framework.GetPodKey(pod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - if _, ok := cache.podStates[key]; ok { - return fmt.Errorf("pod %v(%v) is in the cache, so can't be assumed", key, klog.KObj(pod)) - } - - return cache.addPod(logger, pod, true) -} - -func (cache *cacheImpl) FinishBinding(logger klog.Logger, pod *v1.Pod) error { - return cache.finishBinding(logger, pod, time.Now()) -} - -// finishBinding exists to make tests deterministic by injecting now as an argument -func (cache *cacheImpl) finishBinding(logger klog.Logger, pod *v1.Pod, now time.Time) error { - key, err := framework.GetPodKey(pod) - if err != nil { - return err - } - - cache.mu.RLock() - defer cache.mu.RUnlock() - - logger.V(5).Info("Finished binding for pod, can be expired", "podKey", key, "pod", klog.KObj(pod)) - currState, ok := cache.podStates[key] - if ok && cache.assumedPods.Has(key) { - if cache.ttl == time.Duration(0) { - currState.deadline = nil - } else { - dl := now.Add(cache.ttl) - currState.deadline = &dl - } - currState.bindingFinished = true - } - return nil -} - -func (cache *cacheImpl) ForgetPod(logger klog.Logger, pod *v1.Pod) error { - key, err := framework.GetPodKey(pod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - - currState, ok := cache.podStates[key] - if ok && currState.pod.Spec.NodeName != pod.Spec.NodeName { - return fmt.Errorf("pod %v(%v) was assumed on %v but assigned to %v", key, klog.KObj(pod), pod.Spec.NodeName, currState.pod.Spec.NodeName) - } - - // Only assumed pod can be forgotten. - if ok && cache.assumedPods.Has(key) { - return cache.removePod(logger, pod) - } - return fmt.Errorf("pod %v(%v) wasn't assumed so cannot be forgotten", key, klog.KObj(pod)) -} - -// Assumes that lock is already acquired. -func (cache *cacheImpl) addPod(logger klog.Logger, pod *v1.Pod, assumePod bool) error { - key, err := framework.GetPodKey(pod) - if err != nil { - return err - } - n, ok := cache.nodes[pod.Spec.NodeName] - if !ok { - n = newNodeInfoListItem(framework.NewNodeInfo()) - cache.nodes[pod.Spec.NodeName] = n - } - n.info.AddPod(pod) - cache.moveNodeInfoToHead(logger, pod.Spec.NodeName) - ps := &podState{ - pod: pod, - } - cache.podStates[key] = ps - if assumePod { - cache.assumedPods.Insert(key) - } - return nil -} - -// Assumes that lock is already acquired. -func (cache *cacheImpl) updatePod(logger klog.Logger, oldPod, newPod *v1.Pod) error { - if err := cache.removePod(logger, oldPod); err != nil { - return err - } - return cache.addPod(logger, newPod, false) -} - -// Assumes that lock is already acquired. -// Removes a pod from the cached node info. If the node information was already -// removed and there are no more pods left in the node, cleans up the node from -// the cache. -func (cache *cacheImpl) removePod(logger klog.Logger, pod *v1.Pod) error { - key, err := framework.GetPodKey(pod) - if err != nil { - return err - } - - n, ok := cache.nodes[pod.Spec.NodeName] - if !ok { - logger.Error(nil, "Node not found when trying to remove pod", "node", klog.KRef("", pod.Spec.NodeName), "podKey", key, "pod", klog.KObj(pod)) - } else { - if err := n.info.RemovePod(logger, pod); err != nil { - return err - } - if len(n.info.Pods) == 0 && n.info.Node() == nil { - cache.removeNodeInfoFromList(logger, pod.Spec.NodeName) - } else { - cache.moveNodeInfoToHead(logger, pod.Spec.NodeName) - } - } - - delete(cache.podStates, key) - delete(cache.assumedPods, key) - return nil -} - -func (cache *cacheImpl) AddPod(logger klog.Logger, pod *v1.Pod) error { - key, err := framework.GetPodKey(pod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - - currState, ok := cache.podStates[key] - switch { - case ok && cache.assumedPods.Has(key): - // When assuming, we've already added the Pod to cache, - // Just update here to make sure the Pod's status is up-to-date. - if err = cache.updatePod(logger, currState.pod, pod); err != nil { - logger.Error(err, "Error occurred while updating pod") - } - if currState.pod.Spec.NodeName != pod.Spec.NodeName { - // The pod was added to a different node than it was assumed to. - logger.Info("Pod was added to a different node than it was assumed", "podKey", key, "pod", klog.KObj(pod), "assumedNode", klog.KRef("", pod.Spec.NodeName), "currentNode", klog.KRef("", currState.pod.Spec.NodeName)) - return nil - } - case !ok: - // Pod was expired. We should add it back. - if err = cache.addPod(logger, pod, false); err != nil { - logger.Error(err, "Error occurred while adding pod") - } - default: - return fmt.Errorf("pod %v(%v) was already in added state", key, klog.KObj(pod)) - } - return nil -} - -func (cache *cacheImpl) UpdatePod(logger klog.Logger, oldPod, newPod *v1.Pod) error { - key, err := framework.GetPodKey(oldPod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - - currState, ok := cache.podStates[key] - if !ok { - return fmt.Errorf("pod %v(%v) is not added to scheduler cache, so cannot be updated", key, klog.KObj(oldPod)) - } - - // An assumed pod won't have Update/Remove event. It needs to have Add event - // before Update event, in which case the state would change from Assumed to Added. - if cache.assumedPods.Has(key) { - return fmt.Errorf("assumed pod %v(%v) should not be updated", key, klog.KObj(oldPod)) - } - - if currState.pod.Spec.NodeName != newPod.Spec.NodeName { - logger.Error(nil, "Pod updated on a different node than previously added to", "podKey", key, "pod", klog.KObj(oldPod)) - logger.Error(nil, "scheduler cache is corrupted and can badly affect scheduling decisions") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) - } - return cache.updatePod(logger, oldPod, newPod) -} - -func (cache *cacheImpl) RemovePod(logger klog.Logger, pod *v1.Pod) error { - key, err := framework.GetPodKey(pod) - if err != nil { - return err - } - - cache.mu.Lock() - defer cache.mu.Unlock() - - currState, ok := cache.podStates[key] - if !ok { - return fmt.Errorf("pod %v(%v) is not found in scheduler cache, so cannot be removed from it", key, klog.KObj(pod)) - } - if currState.pod.Spec.NodeName != pod.Spec.NodeName { - logger.Error(nil, "Pod was added to a different node than it was assumed", "podKey", key, "pod", klog.KObj(pod), "assumedNode", klog.KRef("", pod.Spec.NodeName), "currentNode", klog.KRef("", currState.pod.Spec.NodeName)) - if pod.Spec.NodeName != "" { - // An empty NodeName is possible when the scheduler misses a Delete - // event and it gets the last known state from the informer cache. - logger.Error(nil, "scheduler cache is corrupted and can badly affect scheduling decisions") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) - } - } - return cache.removePod(logger, currState.pod) -} - -func (cache *cacheImpl) IsAssumedPod(pod *v1.Pod) (bool, error) { - key, err := framework.GetPodKey(pod) - if err != nil { - return false, err - } - - cache.mu.RLock() - defer cache.mu.RUnlock() - - return cache.assumedPods.Has(key), nil -} - -// GetPod might return a pod for which its node has already been deleted from -// the main cache. This is useful to properly process pod update events. -func (cache *cacheImpl) GetPod(pod *v1.Pod) (*v1.Pod, error) { - key, err := framework.GetPodKey(pod) - if err != nil { - return nil, err - } - - cache.mu.RLock() - defer cache.mu.RUnlock() - - podState, ok := cache.podStates[key] - if !ok { - return nil, fmt.Errorf("pod %v(%v) does not exist in scheduler cache", key, klog.KObj(pod)) - } - - return podState.pod, nil -} - -func (cache *cacheImpl) AddNode(logger klog.Logger, node *v1.Node) *framework.NodeInfo { - cache.mu.Lock() - defer cache.mu.Unlock() - - n, ok := cache.nodes[node.Name] - if !ok { - n = newNodeInfoListItem(framework.NewNodeInfo()) - cache.nodes[node.Name] = n - } else { - cache.removeNodeImageStates(n.info.Node()) - } - cache.moveNodeInfoToHead(logger, node.Name) - - cache.nodeTree.addNode(logger, node) - cache.addNodeImageStates(node, n.info) - n.info.SetNode(node) - return n.info.Snapshot() -} - -func (cache *cacheImpl) UpdateNode(logger klog.Logger, oldNode, newNode *v1.Node) *framework.NodeInfo { - cache.mu.Lock() - defer cache.mu.Unlock() - n, ok := cache.nodes[newNode.Name] - if !ok { - n = newNodeInfoListItem(framework.NewNodeInfo()) - cache.nodes[newNode.Name] = n - cache.nodeTree.addNode(logger, newNode) - } else { - cache.removeNodeImageStates(n.info.Node()) - } - cache.moveNodeInfoToHead(logger, newNode.Name) - - cache.nodeTree.updateNode(logger, oldNode, newNode) - cache.addNodeImageStates(newNode, n.info) - n.info.SetNode(newNode) - return n.info.Snapshot() -} - -// RemoveNode removes a node from the cache's tree. -// The node might still have pods because their deletion events didn't arrive -// yet. Those pods are considered removed from the cache, being the node tree -// the source of truth. -// However, we keep a ghost node with the list of pods until all pod deletion -// events have arrived. A ghost node is skipped from snapshots. -func (cache *cacheImpl) RemoveNode(logger klog.Logger, node *v1.Node) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - n, ok := cache.nodes[node.Name] - if !ok { - return fmt.Errorf("node %v is not found", node.Name) - } - n.info.RemoveNode() - // We remove NodeInfo for this node only if there aren't any pods on this node. - // We can't do it unconditionally, because notifications about pods are delivered - // in a different watch, and thus can potentially be observed later, even though - // they happened before node removal. - if len(n.info.Pods) == 0 { - cache.removeNodeInfoFromList(logger, node.Name) - } else { - cache.moveNodeInfoToHead(logger, node.Name) - } - if err := cache.nodeTree.removeNode(logger, node); err != nil { - return err - } - cache.removeNodeImageStates(node) - return nil -} - -// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in -// scheduler cache. This function assumes the lock to scheduler cache has been acquired. -func (cache *cacheImpl) addNodeImageStates(node *v1.Node, nodeInfo *framework.NodeInfo) { - newSum := make(map[string]*framework.ImageStateSummary) - - for _, image := range node.Status.Images { - for _, name := range image.Names { - // update the entry in imageStates - state, ok := cache.imageStates[name] - if !ok { - state = &framework.ImageStateSummary{ - Size: image.SizeBytes, - Nodes: sets.New(node.Name), - } - cache.imageStates[name] = state - } else { - state.Nodes.Insert(node.Name) - } - // create the ImageStateSummary for this image - if _, ok := newSum[name]; !ok { - newSum[name] = state - } - } - } - nodeInfo.ImageStates = newSum -} - -// removeNodeImageStates removes the given node record from image entries having the node -// in imageStates cache. After the removal, if any image becomes free, i.e., the image -// is no longer available on any node, the image entry will be removed from imageStates. -func (cache *cacheImpl) removeNodeImageStates(node *v1.Node) { - if node == nil { - return - } - - for _, image := range node.Status.Images { - for _, name := range image.Names { - state, ok := cache.imageStates[name] - if ok { - state.Nodes.Delete(node.Name) - if state.Nodes.Len() == 0 { - // Remove the unused image to make sure the length of - // imageStates represents the total number of different - // images on all nodes - delete(cache.imageStates, name) - } - } - } - } -} - -func (cache *cacheImpl) run(logger klog.Logger) { - go wait.Until(func() { - cache.cleanupAssumedPods(logger, time.Now()) - }, cache.period, cache.stop) -} - -// cleanupAssumedPods exists for making test deterministic by taking time as input argument. -// It also reports metrics on the cache size for nodes, pods, and assumed pods. -func (cache *cacheImpl) cleanupAssumedPods(logger klog.Logger, now time.Time) { - cache.mu.Lock() - defer cache.mu.Unlock() - defer cache.updateMetrics() - - // The size of assumedPods should be small - for key := range cache.assumedPods { - ps, ok := cache.podStates[key] - if !ok { - logger.Error(nil, "Key found in assumed set but not in podStates, potentially a logical error") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) - } - if !ps.bindingFinished { - logger.V(5).Info("Could not expire cache for pod as binding is still in progress", "podKey", key, "pod", klog.KObj(ps.pod)) - continue - } - if cache.ttl != 0 && now.After(*ps.deadline) { - logger.Info("Pod expired", "podKey", key, "pod", klog.KObj(ps.pod)) - if err := cache.removePod(logger, ps.pod); err != nil { - logger.Error(err, "ExpirePod failed", "podKey", key, "pod", klog.KObj(ps.pod)) - } - } - } -} - -// updateMetrics updates cache size metric values for pods, assumed pods, and nodes -func (cache *cacheImpl) updateMetrics() { - metrics.CacheSize.WithLabelValues("assumed_pods").Set(float64(len(cache.assumedPods))) - metrics.CacheSize.WithLabelValues("pods").Set(float64(len(cache.podStates))) - metrics.CacheSize.WithLabelValues("nodes").Set(float64(len(cache.nodes))) - - // we intentionally keep them with the deprecation and will remove at v1.34. - //nolint:staticcheck - metrics.SchedulerCacheSize.WithLabelValues("assumed_pods").Set(float64(len(cache.assumedPods))) - //nolint:staticcheck - metrics.SchedulerCacheSize.WithLabelValues("pods").Set(float64(len(cache.podStates))) - //nolint:staticcheck - metrics.SchedulerCacheSize.WithLabelValues("nodes").Set(float64(len(cache.nodes))) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/comparer.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/comparer.go deleted file mode 100644 index cbba8865d9d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/comparer.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package debugger - -import ( - "sort" - "strings" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/klog/v2" - internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache" - internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// CacheComparer is an implementation of the Scheduler's cache comparer. -type CacheComparer struct { - NodeLister corelisters.NodeLister - PodLister corelisters.PodLister - Cache internalcache.Cache - PodQueue internalqueue.SchedulingQueue -} - -// Compare compares the nodes and pods of NodeLister with Cache.Snapshot. -func (c *CacheComparer) Compare(logger klog.Logger) error { - logger.V(3).Info("Cache comparer started") - defer logger.V(3).Info("Cache comparer finished") - - nodes, err := c.NodeLister.List(labels.Everything()) - if err != nil { - return err - } - - pods, err := c.PodLister.List(labels.Everything()) - if err != nil { - return err - } - - dump := c.Cache.Dump() - - pendingPods, _ := c.PodQueue.PendingPods() - - if missed, redundant := c.CompareNodes(nodes, dump.Nodes); len(missed)+len(redundant) != 0 { - logger.Info("Cache mismatch", "missedNodes", missed, "redundantNodes", redundant) - } - - if missed, redundant := c.ComparePods(pods, pendingPods, dump.Nodes); len(missed)+len(redundant) != 0 { - logger.Info("Cache mismatch", "missedPods", missed, "redundantPods", redundant) - } - - return nil -} - -// CompareNodes compares actual nodes with cached nodes. -func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*framework.NodeInfo) (missed, redundant []string) { - actual := []string{} - for _, node := range nodes { - actual = append(actual, node.Name) - } - - cached := []string{} - for nodeName := range nodeinfos { - cached = append(cached, nodeName) - } - - return compareStrings(actual, cached) -} - -// ComparePods compares actual pods with cached pods. -func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*framework.NodeInfo) (missed, redundant []string) { - actual := []string{} - for _, pod := range pods { - actual = append(actual, string(pod.UID)) - } - - cached := []string{} - for _, nodeinfo := range nodeinfos { - for _, p := range nodeinfo.Pods { - cached = append(cached, string(p.Pod.UID)) - } - } - for _, pod := range waitingPods { - cached = append(cached, string(pod.UID)) - } - - return compareStrings(actual, cached) -} - -func compareStrings(actual, cached []string) (missed, redundant []string) { - missed, redundant = []string{}, []string{} - - sort.Strings(actual) - sort.Strings(cached) - - compare := func(i, j int) int { - if i == len(actual) { - return 1 - } else if j == len(cached) { - return -1 - } - return strings.Compare(actual[i], cached[j]) - } - - for i, j := 0, 0; i < len(actual) || j < len(cached); { - switch compare(i, j) { - case 0: - i++ - j++ - case -1: - missed = append(missed, actual[i]) - i++ - case 1: - redundant = append(redundant, cached[j]) - j++ - } - } - - return -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/debugger.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/debugger.go deleted file mode 100644 index 5f0bc8a2c4a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/debugger.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package debugger - -import ( - "context" - "os" - "os/signal" - - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/klog/v2" - internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache" - internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue" -) - -// CacheDebugger provides ways to check and write cache information for debugging. -type CacheDebugger struct { - Comparer CacheComparer - Dumper CacheDumper -} - -// New creates a CacheDebugger. -func New( - nodeLister corelisters.NodeLister, - podLister corelisters.PodLister, - cache internalcache.Cache, - podQueue internalqueue.SchedulingQueue, -) *CacheDebugger { - return &CacheDebugger{ - Comparer: CacheComparer{ - NodeLister: nodeLister, - PodLister: podLister, - Cache: cache, - PodQueue: podQueue, - }, - Dumper: CacheDumper{ - cache: cache, - podQueue: podQueue, - }, - } -} - -// ListenForSignal starts a goroutine that will trigger the CacheDebugger's -// behavior when the process receives SIGINT (Windows) or SIGUSER2 (non-Windows). -func (d *CacheDebugger) ListenForSignal(ctx context.Context) { - logger := klog.FromContext(ctx) - stopCh := ctx.Done() - ch := make(chan os.Signal, 1) - signal.Notify(ch, compareSignal) - - go func() { - for { - select { - case <-stopCh: - return - case <-ch: - d.Comparer.Compare(logger) - d.Dumper.DumpAll(logger) - } - } - }() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/dumper.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/dumper.go deleted file mode 100644 index 96288a501d1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/dumper.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package debugger - -import ( - "fmt" - "strings" - - "k8s.io/klog/v2" - - v1 "k8s.io/api/core/v1" - internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache" - "k8s.io/kubernetes/pkg/scheduler/backend/queue" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// CacheDumper writes some information from the scheduler cache and the scheduling queue to the -// scheduler logs for debugging purposes. -type CacheDumper struct { - cache internalcache.Cache - podQueue queue.SchedulingQueue -} - -// DumpAll writes cached nodes and scheduling queue information to the scheduler logs. -func (d *CacheDumper) DumpAll(logger klog.Logger) { - d.dumpNodes(logger) - d.dumpSchedulingQueue(logger) -} - -// dumpNodes writes NodeInfo to the scheduler logs. -func (d *CacheDumper) dumpNodes(logger klog.Logger) { - dump := d.cache.Dump() - nodeInfos := make([]string, 0, len(dump.Nodes)) - for name, nodeInfo := range dump.Nodes { - nodeInfos = append(nodeInfos, d.printNodeInfo(name, nodeInfo)) - } - // Extra blank line added between node entries for readability. - logger.Info("Dump of cached NodeInfo", "nodes", strings.Join(nodeInfos, "\n\n")) -} - -// dumpSchedulingQueue writes pods in the scheduling queue to the scheduler logs. -func (d *CacheDumper) dumpSchedulingQueue(logger klog.Logger) { - pendingPods, s := d.podQueue.PendingPods() - var podData strings.Builder - for _, p := range pendingPods { - podData.WriteString(printPod(p)) - } - logger.Info("Dump of scheduling queue", "summary", s, "pods", podData.String()) -} - -// printNodeInfo writes parts of NodeInfo to a string. -func (d *CacheDumper) printNodeInfo(name string, n *framework.NodeInfo) string { - var nodeData strings.Builder - nodeData.WriteString(fmt.Sprintf("Node name: %s\nDeleted: %t\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n", - name, n.Node() == nil, n.Requested, n.Allocatable, len(n.Pods))) - // Dumping Pod Info - for _, p := range n.Pods { - nodeData.WriteString(printPod(p.Pod)) - } - // Dumping nominated pods info on the node - nominatedPodInfos := d.podQueue.NominatedPodsForNode(name) - if len(nominatedPodInfos) != 0 { - nodeData.WriteString(fmt.Sprintf("Nominated Pods(number: %v):\n", len(nominatedPodInfos))) - for _, pi := range nominatedPodInfos { - nodeData.WriteString(printPod(pi.Pod)) - } - } - return nodeData.String() -} - -// printPod writes parts of a Pod object to a string. -func printPod(p *v1.Pod) string { - return fmt.Sprintf("name: %v, namespace: %v, uid: %v, phase: %v, nominated node: %v\n", p.Name, p.Namespace, p.UID, p.Status.Phase, p.Status.NominatedNodeName) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/signal.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/signal.go deleted file mode 100644 index df5c83cea0d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/signal.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !windows -// +build !windows - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package debugger - -import "syscall" - -// compareSignal is the signal to trigger cache compare. For non-windows -// environment it's SIGUSR2. -var compareSignal = syscall.SIGUSR2 diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/signal_windows.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/signal_windows.go deleted file mode 100644 index 25c015b0e17..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger/signal_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package debugger - -import "os" - -// compareSignal is the signal to trigger cache compare. For windows, -// it's SIGINT. -var compareSignal = os.Interrupt diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/interface.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/interface.go deleted file mode 100644 index 24b7fd49066..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/interface.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// Cache collects pods' information and provides node-level aggregated information. -// It's intended for generic scheduler to do efficient lookup. -// Cache's operations are pod centric. It does incremental updates based on pod events. -// Pod events are sent via network. We don't have guaranteed delivery of all events: -// We use Reflector to list and watch from remote. -// Reflector might be slow and do a relist, which would lead to missing events. -// -// State Machine of a pod's events in scheduler's cache: -// -// +-------------------------------------------+ +----+ -// | Add | | | -// | | | | Update -// + Assume Add v v | -// -// Initial +--------> Assumed +------------+---> Added <--+ -// -// ^ + + | + -// | | | | | -// | | | Add | | Remove -// | | | | | -// | | | + | -// +----------------+ +-----------> Expired +----> Deleted -// Forget Expire -// -// Note that an assumed pod can expire, because if we haven't received Add event notifying us -// for a while, there might be some problems and we shouldn't keep the pod in cache anymore. -// -// Note that "Initial", "Expired", and "Deleted" pods do not actually exist in cache. -// Based on existing use cases, we are making the following assumptions: -// - No pod would be assumed twice -// - A pod could be added without going through scheduler. In this case, we will see Add but not Assume event. -// - If a pod wasn't added, it wouldn't be removed or updated. -// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue, -// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache. -type Cache interface { - // NodeCount returns the number of nodes in the cache. - // DO NOT use outside of tests. - NodeCount() int - - // PodCount returns the number of pods in the cache (including those from deleted nodes). - // DO NOT use outside of tests. - PodCount() (int, error) - - // AssumePod assumes a pod scheduled and aggregates the pod's information into its node. - // The implementation also decides the policy to expire pod before being confirmed (receiving Add event). - // After expiration, its information would be subtracted. - AssumePod(logger klog.Logger, pod *v1.Pod) error - - // FinishBinding signals that cache for assumed pod can be expired - FinishBinding(logger klog.Logger, pod *v1.Pod) error - - // ForgetPod removes an assumed pod from cache. - ForgetPod(logger klog.Logger, pod *v1.Pod) error - - // AddPod either confirms a pod if it's assumed, or adds it back if it's expired. - // If added back, the pod's information would be added again. - AddPod(logger klog.Logger, pod *v1.Pod) error - - // UpdatePod removes oldPod's information and adds newPod's information. - UpdatePod(logger klog.Logger, oldPod, newPod *v1.Pod) error - - // RemovePod removes a pod. The pod's information would be subtracted from assigned node. - RemovePod(logger klog.Logger, pod *v1.Pod) error - - // GetPod returns the pod from the cache with the same namespace and the - // same name of the specified pod. - GetPod(pod *v1.Pod) (*v1.Pod, error) - - // IsAssumedPod returns true if the pod is assumed and not expired. - IsAssumedPod(pod *v1.Pod) (bool, error) - - // AddNode adds overall information about node. - // It returns a clone of added NodeInfo object. - AddNode(logger klog.Logger, node *v1.Node) *framework.NodeInfo - - // UpdateNode updates overall information about node. - // It returns a clone of updated NodeInfo object. - UpdateNode(logger klog.Logger, oldNode, newNode *v1.Node) *framework.NodeInfo - - // RemoveNode removes overall information about node. - RemoveNode(logger klog.Logger, node *v1.Node) error - - // UpdateSnapshot updates the passed infoSnapshot to the current contents of Cache. - // The node info contains aggregated information of pods scheduled (including assumed to be) - // on this node. - // The snapshot only includes Nodes that are not deleted at the time this function is called. - // nodeinfo.Node() is guaranteed to be not nil for all the nodes in the snapshot. - UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapshot) error - - // Dump produces a dump of the current cache. - Dump() *Dump -} - -// Dump is a dump of the cache state. -type Dump struct { - AssumedPods sets.Set[string] - Nodes map[string]*framework.NodeInfo -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/node_tree.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/node_tree.go deleted file mode 100644 index fe66ea7b207..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/node_tree.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "errors" - "fmt" - - v1 "k8s.io/api/core/v1" - utilnode "k8s.io/component-helpers/node/topology" - "k8s.io/klog/v2" -) - -// nodeTree is a tree-like data structure that holds node names in each zone. Zone names are -// keys to "NodeTree.tree" and values of "NodeTree.tree" are arrays of node names. -// NodeTree is NOT thread-safe, any concurrent updates/reads from it must be synchronized by the caller. -// It is used only by schedulerCache, and should stay as such. -type nodeTree struct { - tree map[string][]string // a map from zone (region-zone) to an array of nodes in the zone. - zones []string // a list of all the zones in the tree (keys) - numNodes int -} - -// newNodeTree creates a NodeTree from nodes. -func newNodeTree(logger klog.Logger, nodes []*v1.Node) *nodeTree { - nt := &nodeTree{ - tree: make(map[string][]string, len(nodes)), - } - for _, n := range nodes { - nt.addNode(logger, n) - } - return nt -} - -// addNode adds a node and its corresponding zone to the tree. If the zone already exists, the node -// is added to the array of nodes in that zone. -func (nt *nodeTree) addNode(logger klog.Logger, n *v1.Node) { - zone := utilnode.GetZoneKey(n) - if na, ok := nt.tree[zone]; ok { - for _, nodeName := range na { - if nodeName == n.Name { - logger.Info("Did not add to the NodeTree because it already exists", "node", klog.KObj(n)) - return - } - } - nt.tree[zone] = append(na, n.Name) - } else { - nt.zones = append(nt.zones, zone) - nt.tree[zone] = []string{n.Name} - } - logger.V(2).Info("Added node to NodeTree", "node", klog.KObj(n), "zone", zone) - nt.numNodes++ -} - -// removeNode removes a node from the NodeTree. -func (nt *nodeTree) removeNode(logger klog.Logger, n *v1.Node) error { - zone := utilnode.GetZoneKey(n) - if na, ok := nt.tree[zone]; ok { - for i, nodeName := range na { - if nodeName == n.Name { - nt.tree[zone] = append(na[:i], na[i+1:]...) - if len(nt.tree[zone]) == 0 { - nt.removeZone(zone) - } - logger.V(2).Info("Removed node from NodeTree", "node", klog.KObj(n), "zone", zone) - nt.numNodes-- - return nil - } - } - } - logger.Error(nil, "Did not remove Node in NodeTree because it was not found", "node", klog.KObj(n), "zone", zone) - return fmt.Errorf("node %q in group %q was not found", n.Name, zone) -} - -// removeZone removes a zone from tree. -// This function must be called while writer locks are hold. -func (nt *nodeTree) removeZone(zone string) { - delete(nt.tree, zone) - for i, z := range nt.zones { - if z == zone { - nt.zones = append(nt.zones[:i], nt.zones[i+1:]...) - return - } - } -} - -// updateNode updates a node in the NodeTree. -func (nt *nodeTree) updateNode(logger klog.Logger, old, new *v1.Node) { - var oldZone string - if old != nil { - oldZone = utilnode.GetZoneKey(old) - } - newZone := utilnode.GetZoneKey(new) - // If the zone ID of the node has not changed, we don't need to do anything. Name of the node - // cannot be changed in an update. - if oldZone == newZone { - return - } - nt.removeNode(logger, old) // No error checking. We ignore whether the old node exists or not. - nt.addNode(logger, new) -} - -// list returns the list of names of the node. NodeTree iterates over zones and in each zone iterates -// over nodes in a round robin fashion. -func (nt *nodeTree) list() ([]string, error) { - if len(nt.zones) == 0 { - return nil, nil - } - nodesList := make([]string, 0, nt.numNodes) - numExhaustedZones := 0 - nodeIndex := 0 - for len(nodesList) < nt.numNodes { - if numExhaustedZones >= len(nt.zones) { // all zones are exhausted. - return nodesList, errors.New("all zones exhausted before reaching count of nodes expected") - } - for zoneIndex := 0; zoneIndex < len(nt.zones); zoneIndex++ { - na := nt.tree[nt.zones[zoneIndex]] - if nodeIndex >= len(na) { // If the zone is exhausted, continue - if nodeIndex == len(na) { // If it is the first time the zone is exhausted - numExhaustedZones++ - } - continue - } - nodesList = append(nodesList, na[nodeIndex]) - } - nodeIndex++ - } - return nodesList, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/snapshot.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/snapshot.go deleted file mode 100644 index 164f1510c6d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/cache/snapshot.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a -// snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle. -type Snapshot struct { - // nodeInfoMap a map of node name to a snapshot of its NodeInfo. - nodeInfoMap map[string]*framework.NodeInfo - // nodeInfoList is the list of nodes as ordered in the cache's nodeTree. - nodeInfoList []*framework.NodeInfo - // havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms. - havePodsWithAffinityNodeInfoList []*framework.NodeInfo - // havePodsWithRequiredAntiAffinityNodeInfoList is the list of nodes with at least one pod declaring - // required anti-affinity terms. - havePodsWithRequiredAntiAffinityNodeInfoList []*framework.NodeInfo - // usedPVCSet contains a set of PVC names that have one or more scheduled pods using them, - // keyed in the format "namespace/name". - usedPVCSet sets.Set[string] - generation int64 -} - -var _ framework.SharedLister = &Snapshot{} - -// NewEmptySnapshot initializes a Snapshot struct and returns it. -func NewEmptySnapshot() *Snapshot { - return &Snapshot{ - nodeInfoMap: make(map[string]*framework.NodeInfo), - usedPVCSet: sets.New[string](), - } -} - -// NewSnapshot initializes a Snapshot struct and returns it. -func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot { - nodeInfoMap := createNodeInfoMap(pods, nodes) - nodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap)) - havePodsWithAffinityNodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap)) - havePodsWithRequiredAntiAffinityNodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap)) - for _, v := range nodeInfoMap { - nodeInfoList = append(nodeInfoList, v) - if len(v.PodsWithAffinity) > 0 { - havePodsWithAffinityNodeInfoList = append(havePodsWithAffinityNodeInfoList, v) - } - if len(v.PodsWithRequiredAntiAffinity) > 0 { - havePodsWithRequiredAntiAffinityNodeInfoList = append(havePodsWithRequiredAntiAffinityNodeInfoList, v) - } - } - - s := NewEmptySnapshot() - s.nodeInfoMap = nodeInfoMap - s.nodeInfoList = nodeInfoList - s.havePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList - s.havePodsWithRequiredAntiAffinityNodeInfoList = havePodsWithRequiredAntiAffinityNodeInfoList - s.usedPVCSet = createUsedPVCSet(pods) - - return s -} - -// createNodeInfoMap obtains a list of pods and pivots that list into a map -// where the keys are node names and the values are the aggregated information -// for that node. -func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*framework.NodeInfo { - nodeNameToInfo := make(map[string]*framework.NodeInfo) - for _, pod := range pods { - nodeName := pod.Spec.NodeName - if _, ok := nodeNameToInfo[nodeName]; !ok { - nodeNameToInfo[nodeName] = framework.NewNodeInfo() - } - nodeNameToInfo[nodeName].AddPod(pod) - } - imageExistenceMap := createImageExistenceMap(nodes) - - for _, node := range nodes { - if _, ok := nodeNameToInfo[node.Name]; !ok { - nodeNameToInfo[node.Name] = framework.NewNodeInfo() - } - nodeInfo := nodeNameToInfo[node.Name] - nodeInfo.SetNode(node) - nodeInfo.ImageStates = getNodeImageStates(node, imageExistenceMap) - } - return nodeNameToInfo -} - -func createUsedPVCSet(pods []*v1.Pod) sets.Set[string] { - usedPVCSet := sets.New[string]() - for _, pod := range pods { - if pod.Spec.NodeName == "" { - continue - } - - for _, v := range pod.Spec.Volumes { - if v.PersistentVolumeClaim == nil { - continue - } - - key := framework.GetNamespacedName(pod.Namespace, v.PersistentVolumeClaim.ClaimName) - usedPVCSet.Insert(key) - } - } - return usedPVCSet -} - -// getNodeImageStates returns the given node's image states based on the given imageExistence map. -func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.Set[string]) map[string]*framework.ImageStateSummary { - imageStates := make(map[string]*framework.ImageStateSummary) - - for _, image := range node.Status.Images { - for _, name := range image.Names { - imageStates[name] = &framework.ImageStateSummary{ - Size: image.SizeBytes, - NumNodes: imageExistenceMap[name].Len(), - } - } - } - return imageStates -} - -// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names. -func createImageExistenceMap(nodes []*v1.Node) map[string]sets.Set[string] { - imageExistenceMap := make(map[string]sets.Set[string]) - for _, node := range nodes { - for _, image := range node.Status.Images { - for _, name := range image.Names { - if _, ok := imageExistenceMap[name]; !ok { - imageExistenceMap[name] = sets.New(node.Name) - } else { - imageExistenceMap[name].Insert(node.Name) - } - } - } - } - return imageExistenceMap -} - -// NodeInfos returns a NodeInfoLister. -func (s *Snapshot) NodeInfos() framework.NodeInfoLister { - return s -} - -// StorageInfos returns a StorageInfoLister. -func (s *Snapshot) StorageInfos() framework.StorageInfoLister { - return s -} - -// NumNodes returns the number of nodes in the snapshot. -func (s *Snapshot) NumNodes() int { - return len(s.nodeInfoList) -} - -// List returns the list of nodes in the snapshot. -func (s *Snapshot) List() ([]*framework.NodeInfo, error) { - return s.nodeInfoList, nil -} - -// HavePodsWithAffinityList returns the list of nodes with at least one pod with inter-pod affinity -func (s *Snapshot) HavePodsWithAffinityList() ([]*framework.NodeInfo, error) { - return s.havePodsWithAffinityNodeInfoList, nil -} - -// HavePodsWithRequiredAntiAffinityList returns the list of nodes with at least one pod with -// required inter-pod anti-affinity -func (s *Snapshot) HavePodsWithRequiredAntiAffinityList() ([]*framework.NodeInfo, error) { - return s.havePodsWithRequiredAntiAffinityNodeInfoList, nil -} - -// Get returns the NodeInfo of the given node name. -func (s *Snapshot) Get(nodeName string) (*framework.NodeInfo, error) { - if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil { - return v, nil - } - return nil, fmt.Errorf("nodeinfo not found for node name %q", nodeName) -} - -func (s *Snapshot) IsPVCUsedByPods(key string) bool { - return s.usedPVCSet.Has(key) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/heap/heap.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/heap/heap.go deleted file mode 100644 index 182954e2300..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/heap/heap.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Below is the implementation of the a heap. The logic is pretty much the same -// as cache.heap, however, this heap does not perform synchronization. It leaves -// synchronization to the SchedulingQueue. - -package heap - -import ( - "container/heap" - "fmt" - - "k8s.io/kubernetes/pkg/scheduler/metrics" -) - -// KeyFunc is a function type to get the key from an object. -type KeyFunc[T any] func(obj T) string - -type heapItem[T any] struct { - obj T // The object which is stored in the heap. - index int // The index of the object's key in the Heap.queue. -} - -type itemKeyValue[T any] struct { - key string - obj T -} - -// data is an internal struct that implements the standard heap interface -// and keeps the data stored in the heap. -type data[T any] struct { - // items is a map from key of the objects to the objects and their index. - // We depend on the property that items in the map are in the queue and vice versa. - items map[string]*heapItem[T] - // queue implements a heap data structure and keeps the order of elements - // according to the heap invariant. The queue keeps the keys of objects stored - // in "items". - queue []string - - // keyFunc is used to make the key used for queued item insertion and retrieval, and - // should be deterministic. - keyFunc KeyFunc[T] - // lessFunc is used to compare two objects in the heap. - lessFunc LessFunc[T] -} - -var ( - _ = heap.Interface(&data[any]{}) // heapData is a standard heap -) - -// Less compares two objects and returns true if the first one should go -// in front of the second one in the heap. -func (h *data[T]) Less(i, j int) bool { - if i > len(h.queue) || j > len(h.queue) { - return false - } - itemi, ok := h.items[h.queue[i]] - if !ok { - return false - } - itemj, ok := h.items[h.queue[j]] - if !ok { - return false - } - return h.lessFunc(itemi.obj, itemj.obj) -} - -// Len returns the number of items in the Heap. -func (h *data[T]) Len() int { return len(h.queue) } - -// Swap implements swapping of two elements in the heap. This is a part of standard -// heap interface and should never be called directly. -func (h *data[T]) Swap(i, j int) { - if i < 0 || j < 0 { - return - } - h.queue[i], h.queue[j] = h.queue[j], h.queue[i] - item := h.items[h.queue[i]] - item.index = i - item = h.items[h.queue[j]] - item.index = j -} - -// Push is supposed to be called by container/heap.Push only. -func (h *data[T]) Push(kv interface{}) { - keyValue := kv.(*itemKeyValue[T]) - n := len(h.queue) - h.items[keyValue.key] = &heapItem[T]{keyValue.obj, n} - h.queue = append(h.queue, keyValue.key) -} - -// Pop is supposed to be called by container/heap.Pop only. -func (h *data[T]) Pop() interface{} { - if len(h.queue) == 0 { - return nil - } - key := h.queue[len(h.queue)-1] - h.queue = h.queue[0 : len(h.queue)-1] - item, ok := h.items[key] - if !ok { - // This is an error - return nil - } - delete(h.items, key) - return item.obj -} - -// Peek returns the head of the heap without removing it. -func (h *data[T]) Peek() (T, bool) { - if len(h.queue) > 0 { - return h.items[h.queue[0]].obj, true - } - var zero T - return zero, false -} - -// Heap is a producer/consumer queue that implements a heap data structure. -// It can be used to implement priority queues and similar data structures. -type Heap[T any] struct { - // data stores objects and has a queue that keeps their ordering according - // to the heap invariant. - data *data[T] - // metricRecorder updates the counter when elements of a heap get added or - // removed, and it does nothing if it's nil - metricRecorder metrics.MetricRecorder -} - -// AddOrUpdate inserts an item, and puts it in the queue. The item is updated if it -// already exists. -func (h *Heap[T]) AddOrUpdate(obj T) { - key := h.data.keyFunc(obj) - if _, exists := h.data.items[key]; exists { - h.data.items[key].obj = obj - heap.Fix(h.data, h.data.items[key].index) - } else { - heap.Push(h.data, &itemKeyValue[T]{key, obj}) - if h.metricRecorder != nil { - h.metricRecorder.Inc() - } - } -} - -// Delete removes an item. -func (h *Heap[T]) Delete(obj T) error { - key := h.data.keyFunc(obj) - if item, ok := h.data.items[key]; ok { - heap.Remove(h.data, item.index) - if h.metricRecorder != nil { - h.metricRecorder.Dec() - } - return nil - } - return fmt.Errorf("object not found") -} - -// Peek returns the head of the heap without removing it. -func (h *Heap[T]) Peek() (T, bool) { - return h.data.Peek() -} - -// Pop returns the head of the heap and removes it. -func (h *Heap[T]) Pop() (T, error) { - obj := heap.Pop(h.data) - if obj != nil { - if h.metricRecorder != nil { - h.metricRecorder.Dec() - } - return obj.(T), nil - } - var zero T - return zero, fmt.Errorf("heap is empty") -} - -// Get returns the requested item, or sets exists=false. -func (h *Heap[T]) Get(obj T) (T, bool) { - key := h.data.keyFunc(obj) - return h.GetByKey(key) -} - -// GetByKey returns the requested item, or sets exists=false. -func (h *Heap[T]) GetByKey(key string) (T, bool) { - item, exists := h.data.items[key] - if !exists { - var zero T - return zero, false - } - return item.obj, true -} - -func (h *Heap[T]) Has(obj T) bool { - key := h.data.keyFunc(obj) - _, ok := h.GetByKey(key) - return ok -} - -// List returns a list of all the items. -func (h *Heap[T]) List() []T { - list := make([]T, 0, len(h.data.items)) - for _, item := range h.data.items { - list = append(list, item.obj) - } - return list -} - -// Len returns the number of items in the heap. -func (h *Heap[T]) Len() int { - return len(h.data.queue) -} - -// New returns a Heap which can be used to queue up items to process. -func New[T any](keyFn KeyFunc[T], lessFn LessFunc[T]) *Heap[T] { - return NewWithRecorder(keyFn, lessFn, nil) -} - -// NewWithRecorder wraps an optional metricRecorder to compose a Heap object. -func NewWithRecorder[T any](keyFn KeyFunc[T], lessFn LessFunc[T], metricRecorder metrics.MetricRecorder) *Heap[T] { - return &Heap[T]{ - data: &data[T]{ - items: map[string]*heapItem[T]{}, - queue: []string{}, - keyFunc: keyFn, - lessFunc: lessFn, - }, - metricRecorder: metricRecorder, - } -} - -// LessFunc is a function that receives two items and returns true if the first -// item should be placed before the second one when the list is sorted. -type LessFunc[T any] func(item1, item2 T) bool diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/active_queue.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/active_queue.go deleted file mode 100644 index 14c734d7561..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/active_queue.go +++ /dev/null @@ -1,498 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package queue - -import ( - "container/list" - "fmt" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/backend/heap" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/metrics" -) - -// activeQueuer is a wrapper for activeQ related operations. -// Its methods, except "unlocked" ones, take the lock inside. -// Note: be careful when using unlocked() methods. -// getLock() methods should be used only for unlocked() methods -// and it is forbidden to call any other activeQueuer's method under this lock. -type activeQueuer interface { - underLock(func(unlockedActiveQ unlockedActiveQueuer)) - underRLock(func(unlockedActiveQ unlockedActiveQueueReader)) - - update(newPod *v1.Pod, oldPodInfo *framework.QueuedPodInfo) *framework.QueuedPodInfo - delete(pInfo *framework.QueuedPodInfo) error - pop(logger klog.Logger) (*framework.QueuedPodInfo, error) - list() []*v1.Pod - len() int - has(pInfo *framework.QueuedPodInfo) bool - - listInFlightEvents() []interface{} - listInFlightPods() []*v1.Pod - clusterEventsForPod(logger klog.Logger, pInfo *framework.QueuedPodInfo) ([]*clusterEvent, error) - addEventsIfPodInFlight(oldPod, newPod *v1.Pod, events []framework.ClusterEvent) bool - addEventIfAnyInFlight(oldObj, newObj interface{}, event framework.ClusterEvent) bool - - schedulingCycle() int64 - done(pod types.UID) - close() - broadcast() -} - -// unlockedActiveQueuer defines activeQ methods that are not protected by the lock itself. -// underLock() method should be used to protect these methods. -type unlockedActiveQueuer interface { - unlockedActiveQueueReader - // add adds a new pod to the activeQ. - // The event should show which event triggered this addition and is used for the metric recording. - // This method should be called in activeQueue.underLock(). - add(pInfo *framework.QueuedPodInfo, event string) -} - -// unlockedActiveQueueReader defines activeQ read-only methods that are not protected by the lock itself. -// underLock() or underRLock() method should be used to protect these methods. -type unlockedActiveQueueReader interface { - // get returns the pod matching pInfo inside the activeQ. - // Returns false if the pInfo doesn't exist in the queue. - // This method should be called in activeQueue.underLock() or activeQueue.underRLock(). - get(pInfo *framework.QueuedPodInfo) (*framework.QueuedPodInfo, bool) - // has returns if pInfo exists in the queue. - // This method should be called in activeQueue.underLock() or activeQueue.underRLock(). - has(pInfo *framework.QueuedPodInfo) bool -} - -// unlockedActiveQueue defines activeQ methods that are not protected by the lock itself. -// activeQueue.underLock() or activeQueue.underRLock() method should be used to protect these methods. -type unlockedActiveQueue struct { - queue *heap.Heap[*framework.QueuedPodInfo] -} - -func newUnlockedActiveQueue(queue *heap.Heap[*framework.QueuedPodInfo]) *unlockedActiveQueue { - return &unlockedActiveQueue{ - queue: queue, - } -} - -// add adds a new pod to the activeQ. -// The event should show which event triggered this addition and is used for the metric recording. -// This method should be called in activeQueue.underLock(). -func (uaq *unlockedActiveQueue) add(pInfo *framework.QueuedPodInfo, event string) { - uaq.queue.AddOrUpdate(pInfo) - metrics.SchedulerQueueIncomingPods.WithLabelValues("active", event).Inc() -} - -// get returns the pod matching pInfo inside the activeQ. -// Returns false if the pInfo doesn't exist in the queue. -// This method should be called in activeQueue.underLock() or activeQueue.underRLock(). -func (uaq *unlockedActiveQueue) get(pInfo *framework.QueuedPodInfo) (*framework.QueuedPodInfo, bool) { - return uaq.queue.Get(pInfo) -} - -// has returns if pInfo exists in the queue. -// This method should be called in activeQueue.underLock() or activeQueue.underRLock(). -func (uaq *unlockedActiveQueue) has(pInfo *framework.QueuedPodInfo) bool { - return uaq.queue.Has(pInfo) -} - -// backoffQPopper defines method that is used to pop from the backoffQ when the activeQ is empty. -type backoffQPopper interface { - // popBackoff pops the pInfo from the podBackoffQ. - popBackoff() (*framework.QueuedPodInfo, error) - // len returns length of the podBackoffQ queue. - lenBackoff() int -} - -// activeQueue implements activeQueuer. All of the fields have to be protected using the lock. -type activeQueue struct { - // lock synchronizes all operations related to activeQ. - // It protects activeQ, inFlightPods, inFlightEvents, schedulingCycle and closed fields. - // Caution: DO NOT take "SchedulingQueue.lock" after taking "lock". - // You should always take "SchedulingQueue.lock" first, otherwise the queue could end up in deadlock. - // "lock" should not be taken after taking "backoffQueue.lock" or "nominator.nLock". - // Correct locking order is: SchedulingQueue.lock > lock > backoffQueue.lock > nominator.nLock. - lock sync.RWMutex - - // activeQ is heap structure that scheduler actively looks at to find pods to - // schedule. Head of heap is the highest priority pod. - queue *heap.Heap[*framework.QueuedPodInfo] - - // unlockedQueue is a wrapper of queue providing methods that are not locked themselves - // and can be used in the underLock() or underRLock(). - unlockedQueue *unlockedActiveQueue - - // cond is a condition that is notified when the pod is added to activeQ. - // When SchedulerPopFromBackoffQ feature is enabled, - // condition is also notified when the pod is added to backoffQ. - // It is used with lock. - cond sync.Cond - - // inFlightPods holds the UID of all pods which have been popped out for which Done - // hasn't been called yet - in other words, all pods that are currently being - // processed (being scheduled, in permit, or in the binding cycle). - // - // The values in the map are the entry of each pod in the inFlightEvents list. - // The value of that entry is the *v1.Pod at the time that scheduling of that - // pod started, which can be useful for logging or debugging. - inFlightPods map[types.UID]*list.Element - - // inFlightEvents holds the events received by the scheduling queue - // (entry value is clusterEvent) together with in-flight pods (entry - // value is *v1.Pod). Entries get added at the end while the mutex is - // locked, so they get serialized. - // - // The pod entries are added in Pop and used to track which events - // occurred after the pod scheduling attempt for that pod started. - // They get removed when the scheduling attempt is done, at which - // point all events that occurred in the meantime are processed. - // - // After removal of a pod, events at the start of the list are no - // longer needed because all of the other in-flight pods started - // later. Those events can be removed. - inFlightEvents *list.List - - // schedCycle represents sequence number of scheduling cycle and is incremented - // when a pod is popped. - schedCycle int64 - - // closed indicates that the queue is closed. - // It is mainly used to let Pop() exit its control loop while waiting for an item. - closed bool - - // isSchedulingQueueHintEnabled indicates whether the feature gate for the scheduling queue is enabled. - isSchedulingQueueHintEnabled bool - - metricsRecorder metrics.MetricAsyncRecorder - - // backoffQPopper is used to pop from backoffQ when activeQ is empty. - // It is non-nil only when SchedulerPopFromBackoffQ feature is enabled. - backoffQPopper backoffQPopper -} - -func newActiveQueue(queue *heap.Heap[*framework.QueuedPodInfo], isSchedulingQueueHintEnabled bool, metricRecorder metrics.MetricAsyncRecorder, backoffQPopper backoffQPopper) *activeQueue { - aq := &activeQueue{ - queue: queue, - inFlightPods: make(map[types.UID]*list.Element), - inFlightEvents: list.New(), - isSchedulingQueueHintEnabled: isSchedulingQueueHintEnabled, - metricsRecorder: metricRecorder, - unlockedQueue: newUnlockedActiveQueue(queue), - backoffQPopper: backoffQPopper, - } - aq.cond.L = &aq.lock - - return aq -} - -// underLock runs the fn function under the lock.Lock. -// fn can run unlockedActiveQueuer methods but should NOT run any other activeQueue method, -// as it would end up in deadlock. -func (aq *activeQueue) underLock(fn func(unlockedActiveQ unlockedActiveQueuer)) { - aq.lock.Lock() - defer aq.lock.Unlock() - fn(aq.unlockedQueue) -} - -// underLock runs the fn function under the lock.RLock. -// fn can run unlockedActiveQueueReader methods but should NOT run any other activeQueue method, -// as it would end up in deadlock. -func (aq *activeQueue) underRLock(fn func(unlockedActiveQ unlockedActiveQueueReader)) { - aq.lock.RLock() - defer aq.lock.RUnlock() - fn(aq.unlockedQueue) -} - -// update updates the pod in activeQ if oldPodInfo is already in the queue. -// It returns new pod info if updated, nil otherwise. -func (aq *activeQueue) update(newPod *v1.Pod, oldPodInfo *framework.QueuedPodInfo) *framework.QueuedPodInfo { - aq.lock.Lock() - defer aq.lock.Unlock() - - if pInfo, exists := aq.queue.Get(oldPodInfo); exists { - _ = pInfo.Update(newPod) - aq.queue.AddOrUpdate(pInfo) - return pInfo - } - return nil -} - -// delete deletes the pod info from activeQ. -func (aq *activeQueue) delete(pInfo *framework.QueuedPodInfo) error { - aq.lock.Lock() - defer aq.lock.Unlock() - - return aq.queue.Delete(pInfo) -} - -// pop removes the head of the queue and returns it. -// It blocks if the queue is empty and waits until a new item is added to the queue. -// It increments scheduling cycle when a pod is popped. -func (aq *activeQueue) pop(logger klog.Logger) (*framework.QueuedPodInfo, error) { - aq.lock.Lock() - defer aq.lock.Unlock() - - return aq.unlockedPop(logger) -} - -func (aq *activeQueue) unlockedPop(logger klog.Logger) (*framework.QueuedPodInfo, error) { - var pInfo *framework.QueuedPodInfo - for aq.queue.Len() == 0 { - // backoffQPopper is non-nil only if SchedulerPopFromBackoffQ feature is enabled. - // In case of non-empty backoffQ, try popping from there. - if aq.backoffQPopper != nil && aq.backoffQPopper.lenBackoff() != 0 { - break - } - // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. - // When Close() is called, the p.closed is set and the condition is broadcast, - // which causes this loop to continue and return from the Pop(). - if aq.closed { - logger.V(2).Info("Scheduling queue is closed") - return nil, nil - } - aq.cond.Wait() - } - pInfo, err := aq.queue.Pop() - if err != nil { - if aq.backoffQPopper == nil { - return nil, err - } - // Try to pop from backoffQ when activeQ is empty. - pInfo, err = aq.backoffQPopper.popBackoff() - if err != nil { - return nil, err - } - metrics.SchedulerQueueIncomingPods.WithLabelValues("active", framework.PopFromBackoffQ).Inc() - } - pInfo.Attempts++ - pInfo.BackoffExpiration = time.Time{} - // In flight, no concurrent events yet. - if aq.isSchedulingQueueHintEnabled { - // If the pod is already in the map, we shouldn't overwrite the inFlightPods otherwise it'd lead to a memory leak. - // https://github.com/kubernetes/kubernetes/pull/127016 - if _, ok := aq.inFlightPods[pInfo.Pod.UID]; ok { - // Just report it as an error, but no need to stop the scheduler - // because it likely doesn't cause any visible issues from the scheduling perspective. - logger.Error(nil, "the same pod is tracked in multiple places in the scheduler, and just discard it", "pod", klog.KObj(pInfo.Pod)) - // Just ignore/discard this duplicated pod and try to pop the next one. - return aq.unlockedPop(logger) - } - - aq.metricsRecorder.ObserveInFlightEventsAsync(metrics.PodPoppedInFlightEvent, 1, false) - aq.inFlightPods[pInfo.Pod.UID] = aq.inFlightEvents.PushBack(pInfo.Pod) - } - aq.schedCycle++ - - // Update metrics and reset the set of unschedulable plugins for the next attempt. - for plugin := range pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) { - metrics.UnschedulableReason(plugin, pInfo.Pod.Spec.SchedulerName).Dec() - } - pInfo.UnschedulablePlugins.Clear() - pInfo.PendingPlugins.Clear() - - return pInfo, nil -} - -// list returns all pods that are in the queue. -func (aq *activeQueue) list() []*v1.Pod { - aq.lock.RLock() - defer aq.lock.RUnlock() - var result []*v1.Pod - for _, pInfo := range aq.queue.List() { - result = append(result, pInfo.Pod) - } - return result -} - -// len returns length of the queue. -func (aq *activeQueue) len() int { - return aq.queue.Len() -} - -// has inform if pInfo exists in the queue. -func (aq *activeQueue) has(pInfo *framework.QueuedPodInfo) bool { - aq.lock.RLock() - defer aq.lock.RUnlock() - return aq.queue.Has(pInfo) -} - -// listInFlightEvents returns all inFlightEvents. -func (aq *activeQueue) listInFlightEvents() []interface{} { - aq.lock.RLock() - defer aq.lock.RUnlock() - var values []interface{} - for event := aq.inFlightEvents.Front(); event != nil; event = event.Next() { - values = append(values, event.Value) - } - return values -} - -// listInFlightPods returns all inFlightPods. -func (aq *activeQueue) listInFlightPods() []*v1.Pod { - aq.lock.RLock() - defer aq.lock.RUnlock() - var pods []*v1.Pod - for _, obj := range aq.inFlightPods { - pods = append(pods, obj.Value.(*v1.Pod)) - } - return pods -} - -// clusterEventsForPod gets all cluster events that have happened during pod for pInfo is being scheduled. -func (aq *activeQueue) clusterEventsForPod(logger klog.Logger, pInfo *framework.QueuedPodInfo) ([]*clusterEvent, error) { - aq.lock.RLock() - defer aq.lock.RUnlock() - logger.V(5).Info("Checking events for in-flight pod", "pod", klog.KObj(pInfo.Pod), "unschedulablePlugins", pInfo.UnschedulablePlugins, "inFlightEventsSize", aq.inFlightEvents.Len(), "inFlightPodsSize", len(aq.inFlightPods)) - - // AddUnschedulableIfNotPresent is called with the Pod at the end of scheduling or binding. - // So, given pInfo should have been Pop()ed before, - // we can assume pInfo must be recorded in inFlightPods and thus inFlightEvents. - inFlightPod, ok := aq.inFlightPods[pInfo.Pod.UID] - if !ok { - return nil, fmt.Errorf("in flight Pod isn't found in the scheduling queue. If you see this error log, it's likely a bug in the scheduler") - } - - var events []*clusterEvent - for event := inFlightPod.Next(); event != nil; event = event.Next() { - e, ok := event.Value.(*clusterEvent) - if !ok { - // Must be another in-flight Pod (*v1.Pod). Can be ignored. - continue - } - events = append(events, e) - } - return events, nil -} - -// addEventsIfPodInFlight adds clusterEvent to inFlightEvents if the newPod is in inFlightPods. -// It returns true if pushed the event to the inFlightEvents. -func (aq *activeQueue) addEventsIfPodInFlight(oldPod, newPod *v1.Pod, events []framework.ClusterEvent) bool { - aq.lock.Lock() - defer aq.lock.Unlock() - - _, ok := aq.inFlightPods[newPod.UID] - if ok { - for _, event := range events { - aq.metricsRecorder.ObserveInFlightEventsAsync(event.Label(), 1, false) - aq.inFlightEvents.PushBack(&clusterEvent{ - event: event, - oldObj: oldPod, - newObj: newPod, - }) - } - } - return ok -} - -// addEventIfAnyInFlight adds clusterEvent to inFlightEvents if any pod is in inFlightPods. -// It returns true if pushed the event to the inFlightEvents. -func (aq *activeQueue) addEventIfAnyInFlight(oldObj, newObj interface{}, event framework.ClusterEvent) bool { - aq.lock.Lock() - defer aq.lock.Unlock() - - if len(aq.inFlightPods) != 0 { - aq.metricsRecorder.ObserveInFlightEventsAsync(event.Label(), 1, false) - aq.inFlightEvents.PushBack(&clusterEvent{ - event: event, - oldObj: oldObj, - newObj: newObj, - }) - return true - } - return false -} - -func (aq *activeQueue) schedulingCycle() int64 { - aq.lock.RLock() - defer aq.lock.RUnlock() - return aq.schedCycle -} - -// done must be called for pod returned by Pop. This allows the queue to -// keep track of which pods are currently being processed. -func (aq *activeQueue) done(pod types.UID) { - aq.lock.Lock() - defer aq.lock.Unlock() - - aq.unlockedDone(pod) -} - -// unlockedDone is used by the activeQueue internally and doesn't take the lock itself. -// It assumes the lock is already taken outside before the method is called. -func (aq *activeQueue) unlockedDone(pod types.UID) { - inFlightPod, ok := aq.inFlightPods[pod] - if !ok { - // This Pod is already done()ed. - return - } - delete(aq.inFlightPods, pod) - - // Remove the pod from the list. - aq.inFlightEvents.Remove(inFlightPod) - - aggrMetricsCounter := map[string]int{} - // Remove events which are only referred to by this Pod - // so that the inFlightEvents list doesn't grow infinitely. - // If the pod was at the head of the list, then all - // events between it and the next pod are no longer needed - // and can be removed. - for { - e := aq.inFlightEvents.Front() - if e == nil { - // Empty list. - break - } - ev, ok := e.Value.(*clusterEvent) - if !ok { - // A pod, must stop pruning. - break - } - aq.inFlightEvents.Remove(e) - aggrMetricsCounter[ev.event.Label()]-- - } - - for evLabel, count := range aggrMetricsCounter { - aq.metricsRecorder.ObserveInFlightEventsAsync(evLabel, float64(count), false) - } - - aq.metricsRecorder.ObserveInFlightEventsAsync(metrics.PodPoppedInFlightEvent, -1, - // If it's the last Pod in inFlightPods, we should force-flush the metrics. - // Otherwise, especially in small clusters, which don't get a new Pod frequently, - // the metrics might not be flushed for a long time. - len(aq.inFlightPods) == 0) -} - -// close closes the activeQueue. -func (aq *activeQueue) close() { - aq.lock.Lock() - defer aq.lock.Unlock() - // We should call done() for all in-flight pods to clean up the inFlightEvents metrics. - // It's safe even if the binding cycle running asynchronously calls done() afterwards - // done() will just be a no-op. - for pod := range aq.inFlightPods { - aq.unlockedDone(pod) - } - aq.closed = true -} - -// broadcast notifies the pop() operation that new pod(s) was added to the activeQueue. -func (aq *activeQueue) broadcast() { - aq.cond.Broadcast() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/backoff_queue.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/backoff_queue.go deleted file mode 100644 index 6e0bbd6808c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/backoff_queue.go +++ /dev/null @@ -1,405 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package queue - -import ( - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/backend/heap" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/utils/clock" -) - -// backoffQOrderingWindowDuration is a duration of an ordering window in the podBackoffQ. -// In each window, represented as a whole second, pods are ordered by priority. -// It is the same as interval of flushing the pods from the podBackoffQ to the activeQ, to flush the whole windows there. -// This works only if PopFromBackoffQ feature is enabled. -// See the KEP-5142 (http://kep.k8s.io/5142) for rationale. -const backoffQOrderingWindowDuration = time.Second - -// backoffQueuer is a wrapper for backoffQ related operations. -// Its methods that relies on the queues, take the lock inside. -type backoffQueuer interface { - // isPodBackingoff returns true if a pod is still waiting for its backoff timer. - // If this returns true, the pod should not be re-tried. - // If the pod backoff time is in the actual ordering window, it should still be backing off. - isPodBackingoff(podInfo *framework.QueuedPodInfo) bool - // popAllBackoffCompleted pops all pods from podBackoffQ and podErrorBackoffQ that completed backoff. - popAllBackoffCompleted(logger klog.Logger) []*framework.QueuedPodInfo - - // podInitialBackoffDuration returns initial backoff duration that pod can get. - podInitialBackoffDuration() time.Duration - // podMaxBackoffDuration returns maximum backoff duration that pod can get. - podMaxBackoffDuration() time.Duration - // waitUntilAlignedWithOrderingWindow waits until the time reaches a multiple of backoffQOrderingWindowDuration. - // It then runs the f function at the backoffQOrderingWindowDuration interval using a ticker. - // It's important to align the flushing time, because podBackoffQ's ordering is based on the windows - // and whole windows have to be flushed at one time without a visible latency. - waitUntilAlignedWithOrderingWindow(f func(), stopCh <-chan struct{}) - - // add adds the pInfo to backoffQueue. - // The event should show which event triggered this addition and is used for the metric recording. - // It also ensures that pInfo is not in both queues. - add(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string) - // update updates the pod in backoffQueue if oldPodInfo is already in the queue. - // It returns new pod info if updated, nil otherwise. - update(newPod *v1.Pod, oldPodInfo *framework.QueuedPodInfo) *framework.QueuedPodInfo - // delete deletes the pInfo from backoffQueue. - // It returns true if the pod was deleted. - delete(pInfo *framework.QueuedPodInfo) bool - // get returns the pInfo matching given pInfoLookup, if exists. - get(pInfoLookup *framework.QueuedPodInfo) (*framework.QueuedPodInfo, bool) - // has inform if pInfo exists in the queue. - has(pInfo *framework.QueuedPodInfo) bool - // list returns all pods that are in the queue. - list() []*v1.Pod - // len returns length of the queue. - len() int -} - -// backoffQueue implements backoffQueuer and wraps two queues inside, -// providing seamless access as if it were one queue. -type backoffQueue struct { - // lock synchronizes all operations related to backoffQ. - // It protects both podBackoffQ and podErrorBackoffQ. - // Caution: DO NOT take "SchedulingQueue.lock" or "activeQueue.lock" after taking "lock". - // You should always take "SchedulingQueue.lock" and "activeQueue.lock" first, otherwise the queue could end up in deadlock. - // "lock" should not be taken after taking "nominator.nLock". - // Correct locking order is: SchedulingQueue.lock > activeQueue.lock > lock > nominator.nLock. - lock sync.RWMutex - - clock clock.WithTicker - - // podBackoffQ is a heap ordered by backoff expiry. Pods which have completed backoff - // are popped from this heap before the scheduler looks at activeQ - podBackoffQ *heap.Heap[*framework.QueuedPodInfo] - // podErrorBackoffQ is a heap ordered by error backoff expiry. Pods which have completed backoff - // are popped from this heap before the scheduler looks at activeQ - podErrorBackoffQ *heap.Heap[*framework.QueuedPodInfo] - - podInitialBackoff time.Duration - podMaxBackoff time.Duration - // activeQLessFn is used as an eventual less function if two backoff times are equal, - // when the SchedulerPopFromBackoffQ feature is enabled. - activeQLessFn framework.LessFunc - - // isPopFromBackoffQEnabled indicates whether the feature gate SchedulerPopFromBackoffQ is enabled. - isPopFromBackoffQEnabled bool -} - -func newBackoffQueue(clock clock.WithTicker, podInitialBackoffDuration time.Duration, podMaxBackoffDuration time.Duration, activeQLessFn framework.LessFunc, popFromBackoffQEnabled bool) *backoffQueue { - bq := &backoffQueue{ - clock: clock, - podInitialBackoff: podInitialBackoffDuration, - podMaxBackoff: podMaxBackoffDuration, - isPopFromBackoffQEnabled: popFromBackoffQEnabled, - activeQLessFn: activeQLessFn, - } - podBackoffQLessFn := bq.lessBackoffCompleted - if popFromBackoffQEnabled { - podBackoffQLessFn = bq.lessBackoffCompletedWithPriority - } - bq.podBackoffQ = heap.NewWithRecorder(podInfoKeyFunc, podBackoffQLessFn, metrics.NewBackoffPodsRecorder()) - bq.podErrorBackoffQ = heap.NewWithRecorder(podInfoKeyFunc, bq.lessBackoffCompleted, metrics.NewBackoffPodsRecorder()) - - return bq -} - -// podInitialBackoffDuration returns initial backoff duration that pod can get. -func (bq *backoffQueue) podInitialBackoffDuration() time.Duration { - return bq.podInitialBackoff -} - -// podMaxBackoffDuration returns maximum backoff duration that pod can get. -func (bq *backoffQueue) podMaxBackoffDuration() time.Duration { - return bq.podMaxBackoff -} - -// alignToWindow truncates the provided time to the podBackoffQ ordering window. -// It returns the lowest possible timestamp in the window. -func (bq *backoffQueue) alignToWindow(t time.Time) time.Time { - if !bq.isPopFromBackoffQEnabled { - return t - } - return t.Truncate(backoffQOrderingWindowDuration) -} - -// waitUntilAlignedWithOrderingWindow waits until the time reaches a multiple of backoffQOrderingWindowDuration. -// It then runs the f function at the backoffQOrderingWindowDuration interval using a ticker. -// It's important to align the flushing time, because podBackoffQ's ordering is based on the windows -// and whole windows have to be flushed at one time without a visible latency. -func (bq *backoffQueue) waitUntilAlignedWithOrderingWindow(f func(), stopCh <-chan struct{}) { - now := bq.clock.Now() - // Wait until the time reaches the multiple of backoffQOrderingWindowDuration. - durationToNextWindow := bq.alignToWindow(now.Add(backoffQOrderingWindowDuration)).Sub(now) - timer := bq.clock.NewTimer(durationToNextWindow) - select { - case <-stopCh: - timer.Stop() - return - case <-timer.C(): - } - - // Run a ticker to make sure the invocations of f function - // are aligned with the backoffQ's ordering window. - ticker := bq.clock.NewTicker(backoffQOrderingWindowDuration) - for { - select { - case <-stopCh: - return - default: - } - - f() - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger ticker.C and stopCh, and ticker.C select falls through. - // In order to mitigate we re-check stopCh at the beginning - // of every loop to prevent extra executions of f(). - select { - case <-stopCh: - ticker.Stop() - return - case <-ticker.C(): - } - } -} - -// lessBackoffCompletedWithPriority is a less function of podBackoffQ if PopFromBackoffQ feature is enabled. -// It orders the pods in the same BackoffOrderingWindow the same as the activeQ will do to improve popping order from backoffQ when activeQ is empty. -func (bq *backoffQueue) lessBackoffCompletedWithPriority(pInfo1, pInfo2 *framework.QueuedPodInfo) bool { - bo1 := bq.getBackoffTime(pInfo1) - bo2 := bq.getBackoffTime(pInfo2) - if !bo1.Equal(bo2) { - return bo1.Before(bo2) - } - // If the backoff time is the same, sort the pod in the same manner as activeQ does. - return bq.activeQLessFn(pInfo1, pInfo2) -} - -// lessBackoffCompleted is a less function of podErrorBackoffQ. -func (bq *backoffQueue) lessBackoffCompleted(pInfo1, pInfo2 *framework.QueuedPodInfo) bool { - bo1 := bq.getBackoffTime(pInfo1) - bo2 := bq.getBackoffTime(pInfo2) - return bo1.Before(bo2) -} - -// isPodBackingoff returns true if a pod is still waiting for its backoff timer. -// If this returns true, the pod should not be re-tried. -// If the pod backoff time is in the actual ordering window, it should still be backing off. -func (bq *backoffQueue) isPodBackingoff(podInfo *framework.QueuedPodInfo) bool { - boTime := bq.getBackoffTime(podInfo) - // Don't use After, because in case of windows equality we want to return true. - return !boTime.Before(bq.alignToWindow(bq.clock.Now())) -} - -// getBackoffTime returns the time that podInfo completes backoff. -// It caches the result in podInfo.BackoffExpiration and returns this value in subsequent calls. -// The cache will be cleared when this pod is poped from the scheduling queue again (i.e., at activeQ's pop), -// because of the fact that the backoff time is calculated based on podInfo.Attempts, -// which doesn't get changed until the pod's scheduling is retried. -func (bq *backoffQueue) getBackoffTime(podInfo *framework.QueuedPodInfo) time.Time { - if podInfo.Attempts == 0 { - // Don't store backoff expiration if the duration is 0 - // to correctly handle isPodBackingoff, if pod should skip backoff, when it wasn't tried at all. - return time.Time{} - } - if podInfo.BackoffExpiration.IsZero() { - duration := bq.calculateBackoffDuration(podInfo) - podInfo.BackoffExpiration = bq.alignToWindow(podInfo.Timestamp.Add(duration)) - } - return podInfo.BackoffExpiration -} - -// calculateBackoffDuration is a helper function for calculating the backoffDuration -// based on the number of attempts the pod has made. -func (bq *backoffQueue) calculateBackoffDuration(podInfo *framework.QueuedPodInfo) time.Duration { - if podInfo.Attempts == 0 { - // When the Pod hasn't experienced any scheduling attempts, - // they aren't obliged to get a backoff penalty at all. - return 0 - } - - duration := bq.podInitialBackoff - for i := 1; i < podInfo.Attempts; i++ { - // Use subtraction instead of addition or multiplication to avoid overflow. - if duration > bq.podMaxBackoff-duration { - return bq.podMaxBackoff - } - duration += duration - } - return duration -} - -func (bq *backoffQueue) popAllBackoffCompletedWithQueue(logger klog.Logger, queue *heap.Heap[*framework.QueuedPodInfo]) []*framework.QueuedPodInfo { - var poppedPods []*framework.QueuedPodInfo - for { - pInfo, ok := queue.Peek() - if !ok || pInfo == nil { - break - } - pod := pInfo.Pod - if bq.isPodBackingoff(pInfo) { - break - } - _, err := queue.Pop() - if err != nil { - logger.Error(err, "Unable to pop pod from backoff queue despite backoff completion", "pod", klog.KObj(pod)) - break - } - poppedPods = append(poppedPods, pInfo) - } - return poppedPods -} - -// popAllBackoffCompleted pops all pods from podBackoffQ and podErrorBackoffQ that completed backoff. -func (bq *backoffQueue) popAllBackoffCompleted(logger klog.Logger) []*framework.QueuedPodInfo { - bq.lock.Lock() - defer bq.lock.Unlock() - - // Ensure both queues are called - return append(bq.popAllBackoffCompletedWithQueue(logger, bq.podBackoffQ), bq.popAllBackoffCompletedWithQueue(logger, bq.podErrorBackoffQ)...) -} - -// add adds the pInfo to backoffQueue. -// The event should show which event triggered this addition and is used for the metric recording. -// It also ensures that pInfo is not in both queues. -func (bq *backoffQueue) add(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string) { - bq.lock.Lock() - defer bq.lock.Unlock() - - // If pod has empty both unschedulable plugins and pending plugins, - // it means that it failed because of error and should be moved to podErrorBackoffQ. - if pInfo.UnschedulablePlugins.Len() == 0 && pInfo.PendingPlugins.Len() == 0 { - bq.podErrorBackoffQ.AddOrUpdate(pInfo) - // Ensure the pod is not in the podBackoffQ and report the error if it happens. - err := bq.podBackoffQ.Delete(pInfo) - if err == nil { - logger.Error(nil, "BackoffQueue add() was called with a pod that was already in the podBackoffQ", "pod", klog.KObj(pInfo.Pod)) - return - } - metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", event).Inc() - return - } - bq.podBackoffQ.AddOrUpdate(pInfo) - // Ensure the pod is not in the podErrorBackoffQ and report the error if it happens. - err := bq.podErrorBackoffQ.Delete(pInfo) - if err == nil { - logger.Error(nil, "BackoffQueue add() was called with a pod that was already in the podErrorBackoffQ", "pod", klog.KObj(pInfo.Pod)) - return - } - metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", event).Inc() -} - -// update updates the pod in backoffQueue if oldPodInfo is already in the queue. -// It returns new pod info if updated, nil otherwise. -func (bq *backoffQueue) update(newPod *v1.Pod, oldPodInfo *framework.QueuedPodInfo) *framework.QueuedPodInfo { - bq.lock.Lock() - defer bq.lock.Unlock() - - // If the pod is in the backoff queue, update it there. - if pInfo, exists := bq.podBackoffQ.Get(oldPodInfo); exists { - _ = pInfo.Update(newPod) - bq.podBackoffQ.AddOrUpdate(pInfo) - return pInfo - } - // If the pod is in the error backoff queue, update it there. - if pInfo, exists := bq.podErrorBackoffQ.Get(oldPodInfo); exists { - _ = pInfo.Update(newPod) - bq.podErrorBackoffQ.AddOrUpdate(pInfo) - return pInfo - } - return nil -} - -// delete deletes the pInfo from backoffQueue. -// It returns true if the pod was deleted. -func (bq *backoffQueue) delete(pInfo *framework.QueuedPodInfo) bool { - bq.lock.Lock() - defer bq.lock.Unlock() - - if bq.podBackoffQ.Delete(pInfo) == nil { - return true - } - return bq.podErrorBackoffQ.Delete(pInfo) == nil -} - -// popBackoff pops the pInfo from the podBackoffQ. -// It returns error if the queue is empty. -// This doesn't pop the pods from the podErrorBackoffQ. -func (bq *backoffQueue) popBackoff() (*framework.QueuedPodInfo, error) { - bq.lock.Lock() - defer bq.lock.Unlock() - - return bq.podBackoffQ.Pop() -} - -// get returns the pInfo matching given pInfoLookup, if exists. -func (bq *backoffQueue) get(pInfoLookup *framework.QueuedPodInfo) (*framework.QueuedPodInfo, bool) { - bq.lock.RLock() - defer bq.lock.RUnlock() - - pInfo, exists := bq.podBackoffQ.Get(pInfoLookup) - if exists { - return pInfo, true - } - return bq.podErrorBackoffQ.Get(pInfoLookup) -} - -// has inform if pInfo exists in the queue. -func (bq *backoffQueue) has(pInfo *framework.QueuedPodInfo) bool { - bq.lock.RLock() - defer bq.lock.RUnlock() - - return bq.podBackoffQ.Has(pInfo) || bq.podErrorBackoffQ.Has(pInfo) -} - -// list returns all pods that are in the queue. -func (bq *backoffQueue) list() []*v1.Pod { - bq.lock.RLock() - defer bq.lock.RUnlock() - - var result []*v1.Pod - for _, pInfo := range bq.podBackoffQ.List() { - result = append(result, pInfo.Pod) - } - for _, pInfo := range bq.podErrorBackoffQ.List() { - result = append(result, pInfo.Pod) - } - return result -} - -// len returns length of the queue. -func (bq *backoffQueue) len() int { - bq.lock.RLock() - defer bq.lock.RUnlock() - - return bq.podBackoffQ.Len() + bq.podErrorBackoffQ.Len() -} - -// lenBackoff returns length of the podBackoffQ. -func (bq *backoffQueue) lenBackoff() int { - bq.lock.RLock() - defer bq.lock.RUnlock() - - return bq.podBackoffQ.Len() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/nominator.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/nominator.go deleted file mode 100644 index 4d6ceaabcc0..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/nominator.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package queue - -import ( - "slices" - "sync" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - listersv1 "k8s.io/client-go/listers/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// nominator is a structure that stores pods nominated to run on nodes. -// It exists because nominatedNodeName of pod objects stored in the structure -// may be different than what scheduler has here. We should be able to find pods -// by their UID and update/delete them. -type nominator struct { - // nLock synchronizes all operations related to nominator. - // It should not be used anywhere else. - // Caution: DO NOT take ("SchedulingQueue.lock" or "activeQueue.lock" or "backoffQueue.lock") after taking "nLock". - // You should always take "SchedulingQueue.lock" and "activeQueue.lock" and "backoffQueue.lock" first, - // otherwise the nominator could end up in deadlock. - // Correct locking order is: SchedulingQueue.lock > activeQueue.lock = backoffQueue.lock > nLock. - nLock sync.RWMutex - - // podLister is used to verify if the given pod is alive. - podLister listersv1.PodLister - // nominatedPods is a map keyed by a node name and the value is a list of - // pods which are nominated to run on the node. These are pods which can be in - // the activeQ or unschedulablePods. - nominatedPods map[string][]podRef - // nominatedPodToNode is map keyed by a Pod UID to the node name where it is - // nominated. - nominatedPodToNode map[types.UID]string -} - -func newPodNominator(podLister listersv1.PodLister) *nominator { - return &nominator{ - podLister: podLister, - nominatedPods: make(map[string][]podRef), - nominatedPodToNode: make(map[types.UID]string), - } -} - -// AddNominatedPod adds a pod to the nominated pods of the given node. -// This is called during the preemption process after a node is nominated to run -// the pod. We update the structure before sending a request to update the pod -// object to avoid races with the following scheduling cycles. -func (npm *nominator) AddNominatedPod(logger klog.Logger, pi *framework.PodInfo, nominatingInfo *framework.NominatingInfo) { - npm.nLock.Lock() - npm.addNominatedPodUnlocked(logger, pi, nominatingInfo) - npm.nLock.Unlock() -} - -func (npm *nominator) addNominatedPodUnlocked(logger klog.Logger, pi *framework.PodInfo, nominatingInfo *framework.NominatingInfo) { - // Always delete the pod if it already exists, to ensure we never store more than - // one instance of the pod. - npm.deleteUnlocked(pi.Pod) - - var nodeName string - if nominatingInfo.Mode() == framework.ModeOverride { - nodeName = nominatingInfo.NominatedNodeName - } else if nominatingInfo.Mode() == framework.ModeNoop { - if pi.Pod.Status.NominatedNodeName == "" { - return - } - nodeName = pi.Pod.Status.NominatedNodeName - } - - if npm.podLister != nil { - // If the pod was removed or if it was already scheduled, don't nominate it. - updatedPod, err := npm.podLister.Pods(pi.Pod.Namespace).Get(pi.Pod.Name) - if err != nil { - logger.V(4).Info("Pod doesn't exist in podLister, aborted adding it to the nominator", "pod", klog.KObj(pi.Pod)) - return - } - if updatedPod.Spec.NodeName != "" { - logger.V(4).Info("Pod is already scheduled to a node, aborted adding it to the nominator", "pod", klog.KObj(pi.Pod), "node", updatedPod.Spec.NodeName) - return - } - } - - npm.nominatedPodToNode[pi.Pod.UID] = nodeName - for _, np := range npm.nominatedPods[nodeName] { - if np.uid == pi.Pod.UID { - logger.V(4).Info("Pod already exists in the nominator", "pod", np.uid) - return - } - } - npm.nominatedPods[nodeName] = append(npm.nominatedPods[nodeName], podToRef(pi.Pod)) -} - -// UpdateNominatedPod updates the with . -func (npm *nominator) UpdateNominatedPod(logger klog.Logger, oldPod *v1.Pod, newPodInfo *framework.PodInfo) { - npm.nLock.Lock() - defer npm.nLock.Unlock() - // In some cases, an Update event with no "NominatedNode" present is received right - // after a node("NominatedNode") is reserved for this pod in memory. - // In this case, we need to keep reserving the NominatedNode when updating the pod pointer. - var nominatingInfo *framework.NominatingInfo - // We won't fall into below `if` block if the Update event represents: - // (1) NominatedNode info is added - // (2) NominatedNode info is updated - // (3) NominatedNode info is removed - if nominatedNodeName(oldPod) == "" && nominatedNodeName(newPodInfo.Pod) == "" { - if nnn, ok := npm.nominatedPodToNode[oldPod.UID]; ok { - // This is the only case we should continue reserving the NominatedNode - nominatingInfo = &framework.NominatingInfo{ - NominatingMode: framework.ModeOverride, - NominatedNodeName: nnn, - } - } - } - // We update irrespective of the nominatedNodeName changed or not, to ensure - // that pod pointer is updated. - npm.deleteUnlocked(oldPod) - npm.addNominatedPodUnlocked(logger, newPodInfo, nominatingInfo) -} - -// DeleteNominatedPodIfExists deletes from nominatedPods. -func (npm *nominator) DeleteNominatedPodIfExists(pod *v1.Pod) { - npm.nLock.Lock() - npm.deleteUnlocked(pod) - npm.nLock.Unlock() -} - -func (npm *nominator) deleteUnlocked(p *v1.Pod) { - nnn, ok := npm.nominatedPodToNode[p.UID] - if !ok { - return - } - for i, np := range npm.nominatedPods[nnn] { - if np.uid == p.UID { - npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn][:i], npm.nominatedPods[nnn][i+1:]...) - if len(npm.nominatedPods[nnn]) == 0 { - delete(npm.nominatedPods, nnn) - } - break - } - } - delete(npm.nominatedPodToNode, p.UID) -} - -func (npm *nominator) nominatedPodsForNode(nodeName string) []podRef { - npm.nLock.RLock() - defer npm.nLock.RUnlock() - return slices.Clone(npm.nominatedPods[nodeName]) -} - -// nominatedNodeName returns nominated node name of a Pod. -func nominatedNodeName(pod *v1.Pod) string { - return pod.Status.NominatedNodeName -} - -type podRef struct { - name string - namespace string - uid types.UID -} - -func podToRef(pod *v1.Pod) podRef { - return podRef{ - name: pod.Name, - namespace: pod.Namespace, - uid: pod.UID, - } -} - -func (np podRef) toPod() *v1.Pod { - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: np.name, - Namespace: np.namespace, - UID: np.uid, - }, - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/scheduling_queue.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/scheduling_queue.go deleted file mode 100644 index 69eae17fd16..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/scheduling_queue.go +++ /dev/null @@ -1,1401 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file contains structures that implement scheduling queue types. -// Scheduling queues hold pods waiting to be scheduled. This file implements a -// priority queue which has two sub queues and a additional data structure, -// namely: activeQ, backoffQ and unschedulablePods. -// - activeQ holds pods that are being considered for scheduling. -// - backoffQ holds pods that moved from unschedulablePods and will move to -// activeQ when their backoff periods complete. -// - unschedulablePods holds pods that were already attempted for scheduling and -// are currently determined to be unschedulable. - -package queue - -import ( - "context" - "fmt" - "math/rand" - "reflect" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers" - listersv1 "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/backend/heap" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread" - "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/kubernetes/pkg/scheduler/util" - "k8s.io/utils/clock" -) - -const ( - // DefaultPodMaxInUnschedulablePodsDuration is the default value for the maximum - // time a pod can stay in unschedulablePods. If a pod stays in unschedulablePods - // for longer than this value, the pod will be moved from unschedulablePods to - // backoffQ or activeQ. If this value is empty, the default value (5min) - // will be used. - DefaultPodMaxInUnschedulablePodsDuration time.Duration = 5 * time.Minute - // Scheduling queue names - activeQ = "Active" - backoffQ = "Backoff" - unschedulablePods = "Unschedulable" - - preEnqueue = "PreEnqueue" -) - -const ( - // DefaultPodInitialBackoffDuration is the default value for the initial backoff duration - // for unschedulable pods. To change the default podInitialBackoffDurationSeconds used by the - // scheduler, update the ComponentConfig value in defaults.go - DefaultPodInitialBackoffDuration time.Duration = 1 * time.Second - // DefaultPodMaxBackoffDuration is the default value for the max backoff duration - // for unschedulable pods. To change the default podMaxBackoffDurationSeconds used by the - // scheduler, update the ComponentConfig value in defaults.go - DefaultPodMaxBackoffDuration time.Duration = 10 * time.Second -) - -// PreEnqueueCheck is a function type. It's used to build functions that -// run against a Pod and the caller can choose to enqueue or skip the Pod -// by the checking result. -type PreEnqueueCheck func(pod *v1.Pod) bool - -// SchedulingQueue is an interface for a queue to store pods waiting to be scheduled. -// The interface follows a pattern similar to cache.FIFO and cache.Heap and -// makes it easy to use those data structures as a SchedulingQueue. -type SchedulingQueue interface { - framework.PodNominator - Add(logger klog.Logger, pod *v1.Pod) - // Activate moves the given pods to activeQ. - // If a pod isn't found in unschedulablePods or backoffQ and it's in-flight, - // the wildcard event is registered so that the pod will be requeued when it comes back. - // But, if a pod isn't found in unschedulablePods or backoffQ and it's not in-flight (i.e., completely unknown pod), - // Activate would ignore the pod. - Activate(logger klog.Logger, pods map[string]*v1.Pod) - // AddUnschedulableIfNotPresent adds an unschedulable pod back to scheduling queue. - // The podSchedulingCycle represents the current scheduling cycle number which can be - // returned by calling SchedulingCycle(). - AddUnschedulableIfNotPresent(logger klog.Logger, pod *framework.QueuedPodInfo, podSchedulingCycle int64) error - // SchedulingCycle returns the current number of scheduling cycle which is - // cached by scheduling queue. Normally, incrementing this number whenever - // a pod is popped (e.g. called Pop()) is enough. - SchedulingCycle() int64 - // Pop removes the head of the queue and returns it. It blocks if the - // queue is empty and waits until a new item is added to the queue. - Pop(logger klog.Logger) (*framework.QueuedPodInfo, error) - // Done must be called for pod returned by Pop. This allows the queue to - // keep track of which pods are currently being processed. - Done(types.UID) - Update(logger klog.Logger, oldPod, newPod *v1.Pod) - Delete(pod *v1.Pod) - // Important Note: preCheck shouldn't include anything that depends on the in-tree plugins' logic. - // (e.g., filter Pods based on added/updated Node's capacity, etc.) - // We know currently some do, but we'll eventually remove them in favor of the scheduling queue hint. - MoveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck) - AssignedPodAdded(logger klog.Logger, pod *v1.Pod) - AssignedPodUpdated(logger klog.Logger, oldPod, newPod *v1.Pod, event framework.ClusterEvent) - - // Close closes the SchedulingQueue so that the goroutine which is - // waiting to pop items can exit gracefully. - Close() - // Run starts the goroutines managing the queue. - Run(logger klog.Logger) - - // The following functions are supposed to be used only for testing or debugging. - GetPod(name, namespace string) (*framework.QueuedPodInfo, bool) - PendingPods() ([]*v1.Pod, string) - InFlightPods() []*v1.Pod - PodsInActiveQ() []*v1.Pod - // PodsInBackoffQ returns all the Pods in the backoffQ. - PodsInBackoffQ() []*v1.Pod - UnschedulablePods() []*v1.Pod -} - -// NewSchedulingQueue initializes a priority queue as a new scheduling queue. -func NewSchedulingQueue( - lessFn framework.LessFunc, - informerFactory informers.SharedInformerFactory, - opts ...Option) SchedulingQueue { - return NewPriorityQueue(lessFn, informerFactory, opts...) -} - -// PriorityQueue implements a scheduling queue. -// The head of PriorityQueue is the highest priority pending pod. This structure -// has two sub queues and a additional data structure, namely: activeQ, -// backoffQ and unschedulablePods. -// - activeQ holds pods that are being considered for scheduling. -// - backoffQ holds pods that moved from unschedulablePods and will move to -// activeQ when their backoff periods complete. -// - unschedulablePods holds pods that were already attempted for scheduling and -// are currently determined to be unschedulable. -type PriorityQueue struct { - *nominator - - stop chan struct{} - clock clock.WithTicker - - // lock takes precedence and should be taken first, - // before any other locks in the queue (activeQueue.lock or backoffQueue.lock or nominator.nLock). - // Correct locking order is: lock > activeQueue.lock > backoffQueue.lock > nominator.nLock. - lock sync.RWMutex - - // the maximum time a pod can stay in the unschedulablePods. - podMaxInUnschedulablePodsDuration time.Duration - - activeQ activeQueuer - backoffQ backoffQueuer - // unschedulablePods holds pods that have been tried and determined unschedulable. - unschedulablePods *UnschedulablePods - // moveRequestCycle caches the sequence number of scheduling cycle when we - // received a move request. Unschedulable pods in and before this scheduling - // cycle will be put back to activeQueue if we were trying to schedule them - // when we received move request. - // TODO: this will be removed after SchedulingQueueHint goes to stable and the feature gate is removed. - moveRequestCycle int64 - - // preEnqueuePluginMap is keyed with profile name, valued with registered preEnqueue plugins. - preEnqueuePluginMap map[string][]framework.PreEnqueuePlugin - // queueingHintMap is keyed with profile name, valued with registered queueing hint functions. - queueingHintMap QueueingHintMapPerProfile - - nsLister listersv1.NamespaceLister - - metricsRecorder metrics.MetricAsyncRecorder - // pluginMetricsSamplePercent is the percentage of plugin metrics to be sampled. - pluginMetricsSamplePercent int - - // isSchedulingQueueHintEnabled indicates whether the feature gate for the scheduling queue is enabled. - isSchedulingQueueHintEnabled bool - // isPopFromBackoffQEnabled indicates whether the feature gate SchedulerPopFromBackoffQ is enabled. - isPopFromBackoffQEnabled bool -} - -// QueueingHintFunction is the wrapper of QueueingHintFn that has PluginName. -type QueueingHintFunction struct { - PluginName string - QueueingHintFn framework.QueueingHintFn -} - -// clusterEvent has the event and involved objects. -type clusterEvent struct { - event framework.ClusterEvent - // oldObj is the object that involved this event. - oldObj interface{} - // newObj is the object that involved this event. - newObj interface{} -} - -type priorityQueueOptions struct { - clock clock.WithTicker - podInitialBackoffDuration time.Duration - podMaxBackoffDuration time.Duration - podMaxInUnschedulablePodsDuration time.Duration - podLister listersv1.PodLister - metricsRecorder metrics.MetricAsyncRecorder - pluginMetricsSamplePercent int - preEnqueuePluginMap map[string][]framework.PreEnqueuePlugin - queueingHintMap QueueingHintMapPerProfile -} - -// Option configures a PriorityQueue -type Option func(*priorityQueueOptions) - -// WithClock sets clock for PriorityQueue, the default clock is clock.RealClock. -func WithClock(clock clock.WithTicker) Option { - return func(o *priorityQueueOptions) { - o.clock = clock - } -} - -// WithPodInitialBackoffDuration sets pod initial backoff duration for PriorityQueue. -func WithPodInitialBackoffDuration(duration time.Duration) Option { - return func(o *priorityQueueOptions) { - o.podInitialBackoffDuration = duration - } -} - -// WithPodMaxBackoffDuration sets pod max backoff duration for PriorityQueue. -func WithPodMaxBackoffDuration(duration time.Duration) Option { - return func(o *priorityQueueOptions) { - o.podMaxBackoffDuration = duration - } -} - -// WithPodLister sets pod lister for PriorityQueue. -func WithPodLister(pl listersv1.PodLister) Option { - return func(o *priorityQueueOptions) { - o.podLister = pl - } -} - -// WithPodMaxInUnschedulablePodsDuration sets podMaxInUnschedulablePodsDuration for PriorityQueue. -func WithPodMaxInUnschedulablePodsDuration(duration time.Duration) Option { - return func(o *priorityQueueOptions) { - o.podMaxInUnschedulablePodsDuration = duration - } -} - -// QueueingHintMapPerProfile is keyed with profile name, valued with queueing hint map registered for the profile. -type QueueingHintMapPerProfile map[string]QueueingHintMap - -// QueueingHintMap is keyed with ClusterEvent, valued with queueing hint functions registered for the event. -type QueueingHintMap map[framework.ClusterEvent][]*QueueingHintFunction - -// WithQueueingHintMapPerProfile sets queueingHintMap for PriorityQueue. -func WithQueueingHintMapPerProfile(m QueueingHintMapPerProfile) Option { - return func(o *priorityQueueOptions) { - o.queueingHintMap = m - } -} - -// WithPreEnqueuePluginMap sets preEnqueuePluginMap for PriorityQueue. -func WithPreEnqueuePluginMap(m map[string][]framework.PreEnqueuePlugin) Option { - return func(o *priorityQueueOptions) { - o.preEnqueuePluginMap = m - } -} - -// WithMetricsRecorder sets metrics recorder. -func WithMetricsRecorder(recorder metrics.MetricAsyncRecorder) Option { - return func(o *priorityQueueOptions) { - o.metricsRecorder = recorder - } -} - -// WithPluginMetricsSamplePercent sets the percentage of plugin metrics to be sampled. -func WithPluginMetricsSamplePercent(percent int) Option { - return func(o *priorityQueueOptions) { - o.pluginMetricsSamplePercent = percent - } -} - -var defaultPriorityQueueOptions = priorityQueueOptions{ - clock: clock.RealClock{}, - podInitialBackoffDuration: DefaultPodInitialBackoffDuration, - podMaxBackoffDuration: DefaultPodMaxBackoffDuration, - podMaxInUnschedulablePodsDuration: DefaultPodMaxInUnschedulablePodsDuration, -} - -// Making sure that PriorityQueue implements SchedulingQueue. -var _ SchedulingQueue = &PriorityQueue{} - -// newQueuedPodInfoForLookup builds a QueuedPodInfo object for a lookup in the queue. -func newQueuedPodInfoForLookup(pod *v1.Pod, plugins ...string) *framework.QueuedPodInfo { - // Since this is only used for a lookup in the queue, we only need to set the Pod, - // and so we avoid creating a full PodInfo, which is expensive to instantiate frequently. - return &framework.QueuedPodInfo{ - PodInfo: &framework.PodInfo{Pod: pod}, - UnschedulablePlugins: sets.New(plugins...), - } -} - -// NewPriorityQueue creates a PriorityQueue object. -func NewPriorityQueue( - lessFn framework.LessFunc, - informerFactory informers.SharedInformerFactory, - opts ...Option, -) *PriorityQueue { - options := defaultPriorityQueueOptions - if options.podLister == nil { - options.podLister = informerFactory.Core().V1().Pods().Lister() - } - for _, opt := range opts { - opt(&options) - } - - isSchedulingQueueHintEnabled := utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) - isPopFromBackoffQEnabled := utilfeature.DefaultFeatureGate.Enabled(features.SchedulerPopFromBackoffQ) - - backoffQ := newBackoffQueue(options.clock, options.podInitialBackoffDuration, options.podMaxBackoffDuration, lessFn, isPopFromBackoffQEnabled) - pq := &PriorityQueue{ - clock: options.clock, - stop: make(chan struct{}), - podMaxInUnschedulablePodsDuration: options.podMaxInUnschedulablePodsDuration, - backoffQ: backoffQ, - unschedulablePods: newUnschedulablePods(metrics.NewUnschedulablePodsRecorder(), metrics.NewGatedPodsRecorder()), - preEnqueuePluginMap: options.preEnqueuePluginMap, - queueingHintMap: options.queueingHintMap, - metricsRecorder: options.metricsRecorder, - pluginMetricsSamplePercent: options.pluginMetricsSamplePercent, - moveRequestCycle: -1, - isSchedulingQueueHintEnabled: isSchedulingQueueHintEnabled, - isPopFromBackoffQEnabled: isPopFromBackoffQEnabled, - } - var backoffQPopper backoffQPopper - if isPopFromBackoffQEnabled { - backoffQPopper = backoffQ - } - pq.activeQ = newActiveQueue(heap.NewWithRecorder(podInfoKeyFunc, heap.LessFunc[*framework.QueuedPodInfo](lessFn), metrics.NewActivePodsRecorder()), isSchedulingQueueHintEnabled, options.metricsRecorder, backoffQPopper) - pq.nsLister = informerFactory.Core().V1().Namespaces().Lister() - pq.nominator = newPodNominator(options.podLister) - - return pq -} - -// Run starts the goroutine to pump from backoffQ to activeQ -func (p *PriorityQueue) Run(logger klog.Logger) { - go p.backoffQ.waitUntilAlignedWithOrderingWindow(func() { - p.flushBackoffQCompleted(logger) - }, p.stop) - go wait.Until(func() { - p.flushUnschedulablePodsLeftover(logger) - }, 30*time.Second, p.stop) -} - -// queueingStrategy indicates how the scheduling queue should enqueue the Pod from unschedulable pod pool. -type queueingStrategy int - -const ( - // queueSkip indicates that the scheduling queue should skip requeuing the Pod to activeQ/backoffQ. - queueSkip queueingStrategy = iota - // queueAfterBackoff indicates that the scheduling queue should requeue the Pod after backoff is completed. - queueAfterBackoff - // queueImmediately indicates that the scheduling queue should skip backoff and requeue the Pod immediately to activeQ. - queueImmediately -) - -// isEventOfInterest returns true if the event is of interest by some plugins. -func (p *PriorityQueue) isEventOfInterest(logger klog.Logger, event framework.ClusterEvent) bool { - if event.IsWildCard() { - // Wildcard event moves Pods that failed with any plugins. - return true - } - - for _, hintMap := range p.queueingHintMap { - for eventToMatch := range hintMap { - if eventToMatch.Match(event) { - // This event is interested by some plugins. - return true - } - } - } - - logger.V(6).Info("receive an event that isn't interested by any enabled plugins", "event", event) - - return false -} - -// isPodWorthRequeuing calls QueueingHintFn of only plugins registered in pInfo.unschedulablePlugins and pInfo.PendingPlugins. -// -// If any of pInfo.PendingPlugins return Queue, -// the scheduling queue is supposed to enqueue this Pod to activeQ, skipping backoffQ. -// If any of pInfo.unschedulablePlugins return Queue, -// the scheduling queue is supposed to enqueue this Pod to activeQ/backoffQ depending on the remaining backoff time of the Pod. -// If all QueueingHintFns returns Skip, the scheduling queue enqueues the Pod back to unschedulable Pod pool -// because no plugin changes the scheduling result via the event. -func (p *PriorityQueue) isPodWorthRequeuing(logger klog.Logger, pInfo *framework.QueuedPodInfo, event framework.ClusterEvent, oldObj, newObj interface{}) queueingStrategy { - rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) - if rejectorPlugins.Len() == 0 { - logger.V(6).Info("Worth requeuing because no failed plugins", "pod", klog.KObj(pInfo.Pod)) - return queueAfterBackoff - } - - if event.IsWildCard() { - // If the wildcard event has a Pod in newObj, - // that indicates that the event wants to be effective for the Pod only. - // Specifically, EventForceActivate could have a target Pod in newObj. - if newObj != nil { - if pod, ok := newObj.(*v1.Pod); !ok || pod.UID != pInfo.Pod.UID { - // This wildcard event is not for this Pod. - if ok { - logger.V(6).Info("Not worth requeuing because the event is wildcard, but for another pod", "pod", klog.KObj(pInfo.Pod), "event", event.Label(), "newObj", klog.KObj(pod)) - } - return queueSkip - } - } - - // If the wildcard event is special one as someone wants to force all Pods to move to activeQ/backoffQ. - // We return queueAfterBackoff in this case, while resetting all blocked plugins. - logger.V(6).Info("Worth requeuing because the event is wildcard", "pod", klog.KObj(pInfo.Pod), "event", event.Label()) - return queueAfterBackoff - } - - hintMap, ok := p.queueingHintMap[pInfo.Pod.Spec.SchedulerName] - if !ok { - // shouldn't reach here unless bug. - logger.Error(nil, "No QueueingHintMap is registered for this profile", "profile", pInfo.Pod.Spec.SchedulerName, "pod", klog.KObj(pInfo.Pod)) - return queueAfterBackoff - } - - pod := pInfo.Pod - queueStrategy := queueSkip - for eventToMatch, hintfns := range hintMap { - if !eventToMatch.Match(event) { - continue - } - - for _, hintfn := range hintfns { - if !rejectorPlugins.Has(hintfn.PluginName) { - // skip if it's not hintfn from rejectorPlugins. - continue - } - - start := time.Now() - hint, err := hintfn.QueueingHintFn(logger, pod, oldObj, newObj) - if err != nil { - // If the QueueingHintFn returned an error, we should treat the event as Queue so that we can prevent - // the Pod from being stuck in the unschedulable pod pool. - oldObjMeta, newObjMeta, asErr := util.As[klog.KMetadata](oldObj, newObj) - if asErr != nil { - logger.Error(err, "QueueingHintFn returns error", "event", event, "plugin", hintfn.PluginName, "pod", klog.KObj(pod)) - } else { - logger.Error(err, "QueueingHintFn returns error", "event", event, "plugin", hintfn.PluginName, "pod", klog.KObj(pod), "oldObj", klog.KObj(oldObjMeta), "newObj", klog.KObj(newObjMeta)) - } - hint = framework.Queue - } - p.metricsRecorder.ObserveQueueingHintDurationAsync(hintfn.PluginName, event.Label(), queueingHintToLabel(hint, err), metrics.SinceInSeconds(start)) - - if hint == framework.QueueSkip { - continue - } - - if pInfo.PendingPlugins.Has(hintfn.PluginName) { - // interprets Queue from the Pending plugin as queueImmediately. - // We can return immediately because queueImmediately is the highest priority. - return queueImmediately - } - - // interprets Queue from the unschedulable plugin as queueAfterBackoff. - - if pInfo.PendingPlugins.Len() == 0 { - // We can return immediately because no Pending plugins, which only can make queueImmediately, registered in this Pod, - // and queueAfterBackoff is the second highest priority. - return queueAfterBackoff - } - - // We can't return immediately because there are some Pending plugins registered in this Pod. - // We need to check if those plugins return Queue or not and if they do, we return queueImmediately. - queueStrategy = queueAfterBackoff - } - } - - return queueStrategy -} - -// queueingHintToLabel converts a hint and an error from QHint to a label string. -func queueingHintToLabel(hint framework.QueueingHint, err error) string { - if err != nil { - return metrics.QueueingHintResultError - } - - switch hint { - case framework.Queue: - return metrics.QueueingHintResultQueue - case framework.QueueSkip: - return metrics.QueueingHintResultQueueSkip - } - - // Shouldn't reach here. - return "" -} - -// runPreEnqueuePlugins iterates PreEnqueue function in each registered PreEnqueuePlugin. -// It returns true if all PreEnqueue function run successfully; otherwise returns false -// upon the first failure. -// Note: we need to associate the failed plugin to `pInfo`, so that the pod can be moved back -// to activeQ by related cluster event. -func (p *PriorityQueue) runPreEnqueuePlugins(ctx context.Context, pInfo *framework.QueuedPodInfo) bool { - logger := klog.FromContext(ctx) - var s *framework.Status - pod := pInfo.Pod - startTime := p.clock.Now() - defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(preEnqueue, s.Code().String(), pod.Spec.SchedulerName).Observe(metrics.SinceInSeconds(startTime)) - }() - - shouldRecordMetric := rand.Intn(100) < p.pluginMetricsSamplePercent - for _, pl := range p.preEnqueuePluginMap[pod.Spec.SchedulerName] { - s = p.runPreEnqueuePlugin(ctx, pl, pod, shouldRecordMetric) - if s.IsSuccess() { - continue - } - pInfo.UnschedulablePlugins.Insert(pl.Name()) - metrics.UnschedulableReason(pl.Name(), pod.Spec.SchedulerName).Inc() - if s.Code() == framework.Error { - logger.Error(s.AsError(), "Unexpected error running PreEnqueue plugin", "pod", klog.KObj(pod), "plugin", pl.Name()) - } else { - logger.V(4).Info("Status after running PreEnqueue plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", s) - } - return false - } - return true -} - -func (p *PriorityQueue) runPreEnqueuePlugin(ctx context.Context, pl framework.PreEnqueuePlugin, pod *v1.Pod, shouldRecordMetric bool) *framework.Status { - if !shouldRecordMetric { - return pl.PreEnqueue(ctx, pod) - } - startTime := p.clock.Now() - s := pl.PreEnqueue(ctx, pod) - p.metricsRecorder.ObservePluginDurationAsync(preEnqueue, pl.Name(), s.Code().String(), p.clock.Since(startTime).Seconds()) - return s -} - -// moveToActiveQ tries to add the pod to the active queue. -// If the pod doesn't pass PreEnqueue plugins, it gets added to unschedulablePods instead. -// It returns a boolean flag to indicate whether the pod is added successfully. -func (p *PriorityQueue) moveToActiveQ(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string) bool { - gatedBefore := pInfo.Gated - // If SchedulerPopFromBackoffQ feature gate is enabled, - // PreEnqueue plugins were called when the pod was added to the backoffQ. - // Don't need to repeat it here when the pod is directly moved from the backoffQ. - if !p.isPopFromBackoffQEnabled || event != framework.BackoffComplete { - pInfo.Gated = !p.runPreEnqueuePlugins(context.Background(), pInfo) - } - - added := false - p.activeQ.underLock(func(unlockedActiveQ unlockedActiveQueuer) { - if pInfo.Gated { - // Add the Pod to unschedulablePods if it's not passing PreEnqueuePlugins. - if unlockedActiveQ.has(pInfo) { - return - } - if p.backoffQ.has(pInfo) { - return - } - if p.unschedulablePods.get(pInfo.Pod) != nil { - return - } - p.unschedulablePods.addOrUpdate(pInfo, event) - logger.V(5).Info("Pod moved to an internal scheduling queue, because the pod is gated", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", unschedulablePods) - return - } - if pInfo.InitialAttemptTimestamp == nil { - now := p.clock.Now() - pInfo.InitialAttemptTimestamp = &now - } - - unlockedActiveQ.add(pInfo, event) - added = true - - p.unschedulablePods.delete(pInfo.Pod, gatedBefore) - p.backoffQ.delete(pInfo) - logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", activeQ) - if event == framework.EventUnscheduledPodAdd.Label() || event == framework.EventUnscheduledPodUpdate.Label() { - p.AddNominatedPod(logger, pInfo.PodInfo, nil) - } - }) - return added -} - -// moveToBackoffQ tries to add the pod to the backoff queue. -// If SchedulerPopFromBackoffQ feature gate is enabled and the pod doesn't pass PreEnqueue plugins, it gets added to unschedulablePods instead. -// It returns a boolean flag to indicate whether the pod is added successfully. -func (p *PriorityQueue) moveToBackoffQ(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string) bool { - // If SchedulerPopFromBackoffQ feature gate is enabled, - // PreEnqueue plugins are called on inserting pods to the backoffQ, - // not to call them again on popping out. - if p.isPopFromBackoffQEnabled { - pInfo.Gated = !p.runPreEnqueuePlugins(context.Background(), pInfo) - if pInfo.Gated { - if p.unschedulablePods.get(pInfo.Pod) == nil { - p.unschedulablePods.addOrUpdate(pInfo, event) - logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", unschedulablePods) - } - return false - } - } - p.backoffQ.add(logger, pInfo, event) - logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", backoffQ) - return true -} - -// Add adds a pod to the active queue. It should be called only when a new pod -// is added so there is no chance the pod is already in active/unschedulable/backoff queues -func (p *PriorityQueue) Add(logger klog.Logger, pod *v1.Pod) { - p.lock.Lock() - defer p.lock.Unlock() - - pInfo := p.newQueuedPodInfo(pod) - if added := p.moveToActiveQ(logger, pInfo, framework.EventUnscheduledPodAdd.Label()); added { - p.activeQ.broadcast() - } -} - -// Activate moves the given pods to activeQ. -// If a pod isn't found in unschedulablePods or backoffQ and it's in-flight, -// the wildcard event is registered so that the pod will be requeued when it comes back. -// But, if a pod isn't found in unschedulablePods or backoffQ and it's not in-flight (i.e., completely unknown pod), -// Activate would ignore the pod. -func (p *PriorityQueue) Activate(logger klog.Logger, pods map[string]*v1.Pod) { - p.lock.Lock() - defer p.lock.Unlock() - - activated := false - for _, pod := range pods { - if p.activate(logger, pod) { - activated = true - continue - } - - // If this pod is in-flight, register the activation event (for when QHint is enabled) or update moveRequestCycle (for when QHints is disabled) - // so that the pod will be requeued when it comes back. - // Specifically in the in-tree plugins, this is for the scenario with the preemption plugin - // where the async preemption API calls are all done or fail at some point before the Pod comes back to the queue. - p.activeQ.addEventsIfPodInFlight(nil, pod, []framework.ClusterEvent{framework.EventForceActivate}) - p.moveRequestCycle = p.activeQ.schedulingCycle() - } - - if activated { - p.activeQ.broadcast() - } -} - -func (p *PriorityQueue) activate(logger klog.Logger, pod *v1.Pod) bool { - var pInfo *framework.QueuedPodInfo - // Verify if the pod is present in unschedulablePods or backoffQ. - if pInfo = p.unschedulablePods.get(pod); pInfo == nil { - // If the pod doesn't belong to unschedulablePods or backoffQ, don't activate it. - // The pod can be already in activeQ. - var exists bool - pInfo, exists = p.backoffQ.get(newQueuedPodInfoForLookup(pod)) - if !exists { - return false - } - // Delete pod from the backoffQ now to make sure it won't be popped from the backoffQ - // just before moving it to the activeQ - if deleted := p.backoffQ.delete(pInfo); !deleted { - // Pod was popped from the backoffQ in the meantime. Don't activate it. - return false - } - } - - if pInfo == nil { - // Redundant safe check. We shouldn't reach here. - logger.Error(nil, "Internal error: cannot obtain pInfo") - return false - } - - return p.moveToActiveQ(logger, pInfo, framework.ForceActivate) -} - -// SchedulingCycle returns current scheduling cycle. -func (p *PriorityQueue) SchedulingCycle() int64 { - return p.activeQ.schedulingCycle() -} - -// determineSchedulingHintForInFlightPod looks at the unschedulable plugins of the given Pod -// and determines the scheduling hint for this Pod while checking the events that happened during in-flight. -func (p *PriorityQueue) determineSchedulingHintForInFlightPod(logger klog.Logger, pInfo *framework.QueuedPodInfo) queueingStrategy { - if len(pInfo.UnschedulablePlugins) == 0 && len(pInfo.PendingPlugins) == 0 { - // No failed plugins are associated with this Pod. - // Meaning something unusual (a temporal failure on kube-apiserver, etc) happened and this Pod gets moved back to the queue. - // In this case, we should retry scheduling it because this Pod may not be retried until the next flush. - return queueAfterBackoff - } - - events, err := p.activeQ.clusterEventsForPod(logger, pInfo) - if err != nil { - logger.Error(err, "Error getting cluster events for pod", "pod", klog.KObj(pInfo.Pod)) - return queueAfterBackoff - } - - // check if there is an event that makes this Pod schedulable based on pInfo.UnschedulablePlugins. - queueingStrategy := queueSkip - for _, e := range events { - logger.V(5).Info("Checking event for in-flight pod", "pod", klog.KObj(pInfo.Pod), "event", e.event.Label()) - - switch p.isPodWorthRequeuing(logger, pInfo, e.event, e.oldObj, e.newObj) { - case queueSkip: - continue - case queueImmediately: - // queueImmediately is the highest priority. - // No need to go through the rest of the events. - return queueImmediately - case queueAfterBackoff: - // replace schedulingHint with queueAfterBackoff - queueingStrategy = queueAfterBackoff - if pInfo.PendingPlugins.Len() == 0 { - // We can return immediately because no Pending plugins, which only can make queueImmediately, registered in this Pod, - // and queueAfterBackoff is the second highest priority. - return queueAfterBackoff - } - } - } - return queueingStrategy -} - -// addUnschedulableIfNotPresentWithoutQueueingHint inserts a pod that cannot be scheduled into -// the queue, unless it is already in the queue. Normally, PriorityQueue puts -// unschedulable pods in `unschedulablePods`. But if there has been a recent move -// request, then the pod is put in `backoffQ`. -// TODO: This function is called only when p.isSchedulingQueueHintEnabled is false, -// and this will be removed after SchedulingQueueHint goes to stable and the feature gate is removed. -func (p *PriorityQueue) addUnschedulableWithoutQueueingHint(logger klog.Logger, pInfo *framework.QueuedPodInfo, podSchedulingCycle int64) error { - pod := pInfo.Pod - // Refresh the timestamp since the pod is re-added. - pInfo.Timestamp = p.clock.Now() - - // When the queueing hint is enabled, they are used differently. - // But, we use all of them as UnschedulablePlugins when the queueing hint isn't enabled so that we don't break the old behaviour. - rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) - - // If a move request has been received, move it to the BackoffQ, otherwise move - // it to unschedulablePods. - for plugin := range rejectorPlugins { - metrics.UnschedulableReason(plugin, pInfo.Pod.Spec.SchedulerName).Inc() - } - if p.moveRequestCycle >= podSchedulingCycle || len(rejectorPlugins) == 0 { - // Two cases to move a Pod to the active/backoff queue: - // - The Pod is rejected by some plugins, but a move request is received after this Pod's scheduling cycle is started. - // In this case, the received event may be make Pod schedulable and we should retry scheduling it. - // - No unschedulable plugins are associated with this Pod, - // meaning something unusual (a temporal failure on kube-apiserver, etc) happened and this Pod gets moved back to the queue. - // In this case, we should retry scheduling it because this Pod may not be retried until the next flush. - if added := p.moveToBackoffQ(logger, pInfo, framework.ScheduleAttemptFailure); added { - if p.isPopFromBackoffQEnabled { - p.activeQ.broadcast() - } - } - } else { - p.unschedulablePods.addOrUpdate(pInfo, framework.ScheduleAttemptFailure) - logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", framework.ScheduleAttemptFailure, "queue", unschedulablePods) - } - - return nil -} - -// AddUnschedulableIfNotPresent inserts a pod that cannot be scheduled into -// the queue, unless it is already in the queue. Normally, PriorityQueue puts -// unschedulable pods in `unschedulablePods`. But if there has been a recent move -// request, then the pod is put in `backoffQ`. -func (p *PriorityQueue) AddUnschedulableIfNotPresent(logger klog.Logger, pInfo *framework.QueuedPodInfo, podSchedulingCycle int64) error { - p.lock.Lock() - defer p.lock.Unlock() - - // In any case, this Pod will be moved back to the queue and we should call Done. - defer p.Done(pInfo.Pod.UID) - - pod := pInfo.Pod - if p.unschedulablePods.get(pod) != nil { - return fmt.Errorf("Pod %v is already present in unschedulable queue", klog.KObj(pod)) - } - - if p.activeQ.has(pInfo) { - return fmt.Errorf("Pod %v is already present in the active queue", klog.KObj(pod)) - } - if p.backoffQ.has(pInfo) { - return fmt.Errorf("Pod %v is already present in the backoff queue", klog.KObj(pod)) - } - - if !p.isSchedulingQueueHintEnabled { - // fall back to the old behavior which doesn't depend on the queueing hint. - return p.addUnschedulableWithoutQueueingHint(logger, pInfo, podSchedulingCycle) - } - - // Refresh the timestamp since the pod is re-added. - pInfo.Timestamp = p.clock.Now() - - // If a move request has been received, move it to the BackoffQ, otherwise move - // it to unschedulablePods. - rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) - for plugin := range rejectorPlugins { - metrics.UnschedulableReason(plugin, pInfo.Pod.Spec.SchedulerName).Inc() - } - - // We check whether this Pod may change its scheduling result by any of events that happened during scheduling. - schedulingHint := p.determineSchedulingHintForInFlightPod(logger, pInfo) - - // In this case, we try to requeue this Pod to activeQ/backoffQ. - queue := p.requeuePodViaQueueingHint(logger, pInfo, schedulingHint, framework.ScheduleAttemptFailure) - logger.V(3).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", framework.ScheduleAttemptFailure, "queue", queue, "schedulingCycle", podSchedulingCycle, "hint", schedulingHint, "unschedulable plugins", rejectorPlugins) - if queue == activeQ || (p.isPopFromBackoffQEnabled && queue == backoffQ) { - // When the Pod is moved to activeQ, need to let p.cond know so that the Pod will be pop()ed out. - p.activeQ.broadcast() - } - - return nil -} - -// flushBackoffQCompleted Moves all pods from backoffQ which have completed backoff in to activeQ -func (p *PriorityQueue) flushBackoffQCompleted(logger klog.Logger) { - p.lock.Lock() - defer p.lock.Unlock() - activated := false - podsCompletedBackoff := p.backoffQ.popAllBackoffCompleted(logger) - for _, pInfo := range podsCompletedBackoff { - if added := p.moveToActiveQ(logger, pInfo, framework.BackoffComplete); added { - activated = true - } - } - if activated { - p.activeQ.broadcast() - } -} - -// flushUnschedulablePodsLeftover moves pods which stay in unschedulablePods -// longer than podMaxInUnschedulablePodsDuration to backoffQ or activeQ. -func (p *PriorityQueue) flushUnschedulablePodsLeftover(logger klog.Logger) { - p.lock.Lock() - defer p.lock.Unlock() - - var podsToMove []*framework.QueuedPodInfo - currentTime := p.clock.Now() - for _, pInfo := range p.unschedulablePods.podInfoMap { - lastScheduleTime := pInfo.Timestamp - if currentTime.Sub(lastScheduleTime) > p.podMaxInUnschedulablePodsDuration { - podsToMove = append(podsToMove, pInfo) - } - } - - if len(podsToMove) > 0 { - p.movePodsToActiveOrBackoffQueue(logger, podsToMove, framework.EventUnschedulableTimeout, nil, nil) - } -} - -// Pop removes the head of the active queue and returns it. It blocks if the -// activeQ is empty and waits until a new item is added to the queue. It -// increments scheduling cycle when a pod is popped. -// Note: This method should NOT be locked by the p.lock at any moment, -// as it would lead to scheduling throughput degradation. -func (p *PriorityQueue) Pop(logger klog.Logger) (*framework.QueuedPodInfo, error) { - return p.activeQ.pop(logger) -} - -// Done must be called for pod returned by Pop. This allows the queue to -// keep track of which pods are currently being processed. -func (p *PriorityQueue) Done(pod types.UID) { - if !p.isSchedulingQueueHintEnabled { - // do nothing if schedulingQueueHint is disabled. - // In that case, we don't have inFlightPods and inFlightEvents. - return - } - p.activeQ.done(pod) -} - -func (p *PriorityQueue) InFlightPods() []*v1.Pod { - if !p.isSchedulingQueueHintEnabled { - // do nothing if schedulingQueueHint is disabled. - // In that case, we don't have inFlightPods and inFlightEvents. - return nil - } - return p.activeQ.listInFlightPods() -} - -// isPodUpdated checks if the pod is updated in a way that it may have become -// schedulable. It drops status of the pod and compares it with old version, -// except for pod.status.resourceClaimStatuses: changing that may have an -// effect on scheduling. -func isPodUpdated(oldPod, newPod *v1.Pod) bool { - strip := func(pod *v1.Pod) *v1.Pod { - p := pod.DeepCopy() - p.ResourceVersion = "" - p.Generation = 0 - p.Status = v1.PodStatus{ - ResourceClaimStatuses: pod.Status.ResourceClaimStatuses, - } - p.ManagedFields = nil - p.Finalizers = nil - return p - } - return !reflect.DeepEqual(strip(oldPod), strip(newPod)) -} - -// Update updates a pod in the active or backoff queue if present. Otherwise, it removes -// the item from the unschedulable queue if pod is updated in a way that it may -// become schedulable and adds the updated one to the active queue. -// If pod is not present in any of the queues, it is added to the active queue. -func (p *PriorityQueue) Update(logger klog.Logger, oldPod, newPod *v1.Pod) { - p.lock.Lock() - defer p.lock.Unlock() - - var events []framework.ClusterEvent - if p.isSchedulingQueueHintEnabled { - events = framework.PodSchedulingPropertiesChange(newPod, oldPod) - // The inflight pod will be requeued using the latest version from the informer cache, which matches what the event delivers. - // Record this Pod update because - // this update may make the Pod schedulable in case it gets rejected and comes back to the queue. - // We can clean it up once we change updatePodInSchedulingQueue to call MoveAllToActiveOrBackoffQueue. - // See https://github.com/kubernetes/kubernetes/pull/125578#discussion_r1648338033 for more context. - if exists := p.activeQ.addEventsIfPodInFlight(oldPod, newPod, events); exists { - logger.V(6).Info("The pod doesn't be queued for now because it's being scheduled and will be queued back if necessary", "pod", klog.KObj(newPod)) - return - } - } - - if oldPod != nil { - oldPodInfo := newQueuedPodInfoForLookup(oldPod) - // If the pod is already in the active queue, just update it there. - if pInfo := p.activeQ.update(newPod, oldPodInfo); pInfo != nil { - p.UpdateNominatedPod(logger, oldPod, pInfo.PodInfo) - return - } - - // If the pod is in the backoff queue, update it there. - if pInfo := p.backoffQ.update(newPod, oldPodInfo); pInfo != nil { - p.UpdateNominatedPod(logger, oldPod, pInfo.PodInfo) - return - } - } - - // If the pod is in the unschedulable queue, updating it may make it schedulable. - if pInfo := p.unschedulablePods.get(newPod); pInfo != nil { - _ = pInfo.Update(newPod) - p.UpdateNominatedPod(logger, oldPod, pInfo.PodInfo) - gated := pInfo.Gated - if p.isSchedulingQueueHintEnabled { - // When unscheduled Pods are updated, we check with QueueingHint - // whether the update may make the pods schedulable. - // Plugins have to implement a QueueingHint for Pod/Update event - // if the rejection from them could be resolved by updating unscheduled Pods itself. - for _, evt := range events { - hint := p.isPodWorthRequeuing(logger, pInfo, evt, oldPod, newPod) - queue := p.requeuePodViaQueueingHint(logger, pInfo, hint, evt.Label()) - if queue != unschedulablePods { - logger.V(5).Info("Pod moved to an internal scheduling queue because the Pod is updated", "pod", klog.KObj(newPod), "event", evt.Label(), "queue", queue) - p.unschedulablePods.delete(pInfo.Pod, gated) - } - if queue == activeQ || (p.isPopFromBackoffQEnabled && queue == backoffQ) { - p.activeQ.broadcast() - break - } - } - return - } - if isPodUpdated(oldPod, newPod) { - // Pod might have completed its backoff time while being in unschedulablePods, - // so we should check isPodBackingoff before moving the pod to backoffQ. - if p.backoffQ.isPodBackingoff(pInfo) { - if added := p.moveToBackoffQ(logger, pInfo, framework.EventUnscheduledPodUpdate.Label()); added { - p.unschedulablePods.delete(pInfo.Pod, gated) - if p.isPopFromBackoffQEnabled { - p.activeQ.broadcast() - } - } - return - } - - if added := p.moveToActiveQ(logger, pInfo, framework.EventUnscheduledPodUpdate.Label()); added { - p.activeQ.broadcast() - } - return - } - - // Pod update didn't make it schedulable, keep it in the unschedulable queue. - p.unschedulablePods.addOrUpdate(pInfo, framework.EventUnscheduledPodUpdate.Label()) - return - } - // If pod is not in any of the queues, we put it in the active queue. - pInfo := p.newQueuedPodInfo(newPod) - if added := p.moveToActiveQ(logger, pInfo, framework.EventUnscheduledPodUpdate.Label()); added { - p.activeQ.broadcast() - } -} - -// Delete deletes the item from either of the two queues. It assumes the pod is -// only in one queue. -func (p *PriorityQueue) Delete(pod *v1.Pod) { - p.lock.Lock() - defer p.lock.Unlock() - p.DeleteNominatedPodIfExists(pod) - pInfo := newQueuedPodInfoForLookup(pod) - if err := p.activeQ.delete(pInfo); err == nil { - return - } - if deleted := p.backoffQ.delete(pInfo); deleted { - return - } - if pInfo = p.unschedulablePods.get(pod); pInfo != nil { - p.unschedulablePods.delete(pod, pInfo.Gated) - } -} - -// AssignedPodAdded is called when a bound pod is added. Creation of this pod -// may make pending pods with matching affinity terms schedulable. -func (p *PriorityQueue) AssignedPodAdded(logger klog.Logger, pod *v1.Pod) { - p.lock.Lock() - - // Pre-filter Pods to move by getUnschedulablePodsWithCrossTopologyTerm - // because Pod related events shouldn't make Pods that rejected by single-node scheduling requirement schedulable. - p.movePodsToActiveOrBackoffQueue(logger, p.getUnschedulablePodsWithCrossTopologyTerm(logger, pod), framework.EventAssignedPodAdd, nil, pod) - p.lock.Unlock() -} - -// AssignedPodUpdated is called when a bound pod is updated. Change of labels -// may make pending pods with matching affinity terms schedulable. -func (p *PriorityQueue) AssignedPodUpdated(logger klog.Logger, oldPod, newPod *v1.Pod, event framework.ClusterEvent) { - p.lock.Lock() - if (framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.UpdatePodScaleDown}.Match(event)) { - // In this case, we don't want to pre-filter Pods by getUnschedulablePodsWithCrossTopologyTerm - // because Pod related events may make Pods that were rejected by NodeResourceFit schedulable. - p.moveAllToActiveOrBackoffQueue(logger, event, oldPod, newPod, nil) - } else { - // Pre-filter Pods to move by getUnschedulablePodsWithCrossTopologyTerm - // because Pod related events only make Pods rejected by cross topology term schedulable. - p.movePodsToActiveOrBackoffQueue(logger, p.getUnschedulablePodsWithCrossTopologyTerm(logger, newPod), event, oldPod, newPod) - } - p.lock.Unlock() -} - -// NOTE: this function assumes a lock has been acquired in the caller. -// moveAllToActiveOrBackoffQueue moves all pods from unschedulablePods to activeQ or backoffQ. -// This function adds all pods and then signals the condition variable to ensure that -// if Pop() is waiting for an item, it receives the signal after all the pods are in the -// queue and the head is the highest priority pod. -func (p *PriorityQueue) moveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck) { - if !p.isEventOfInterest(logger, event) { - // No plugin is interested in this event. - // Return early before iterating all pods in unschedulablePods for preCheck. - return - } - - unschedulablePods := make([]*framework.QueuedPodInfo, 0, len(p.unschedulablePods.podInfoMap)) - for _, pInfo := range p.unschedulablePods.podInfoMap { - if preCheck == nil || preCheck(pInfo.Pod) { - unschedulablePods = append(unschedulablePods, pInfo) - } - } - p.movePodsToActiveOrBackoffQueue(logger, unschedulablePods, event, oldObj, newObj) -} - -// MoveAllToActiveOrBackoffQueue moves all pods from unschedulablePods to activeQ or backoffQ. -// This function adds all pods and then signals the condition variable to ensure that -// if Pop() is waiting for an item, it receives the signal after all the pods are in the -// queue and the head is the highest priority pod. -func (p *PriorityQueue) MoveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck) { - p.lock.Lock() - defer p.lock.Unlock() - p.moveAllToActiveOrBackoffQueue(logger, event, oldObj, newObj, preCheck) -} - -// requeuePodViaQueueingHint tries to requeue Pod to activeQ, backoffQ or unschedulable pod pool based on schedulingHint. -// It returns the queue name Pod goes. -// -// NOTE: this function assumes lock has been acquired in caller -func (p *PriorityQueue) requeuePodViaQueueingHint(logger klog.Logger, pInfo *framework.QueuedPodInfo, strategy queueingStrategy, event string) string { - if strategy == queueSkip { - p.unschedulablePods.addOrUpdate(pInfo, event) - return unschedulablePods - } - - // Pod might have completed its backoff time while being in unschedulablePods, - // so we should check isPodBackingoff before moving the pod to backoffQ. - if strategy == queueAfterBackoff && p.backoffQ.isPodBackingoff(pInfo) { - if added := p.moveToBackoffQ(logger, pInfo, event); added { - return backoffQ - } - return unschedulablePods - } - - // Reach here if schedulingHint is QueueImmediately, or schedulingHint is Queue but the pod is not backing off. - if added := p.moveToActiveQ(logger, pInfo, event); added { - return activeQ - } - // Pod is gated. We don't have to push it back to unschedulable queue, because moveToActiveQ should already have done that. - return unschedulablePods -} - -// NOTE: this function assumes lock has been acquired in caller -func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(logger klog.Logger, podInfoList []*framework.QueuedPodInfo, event framework.ClusterEvent, oldObj, newObj interface{}) { - if !p.isEventOfInterest(logger, event) { - // No plugin is interested in this event. - return - } - - activated := false - for _, pInfo := range podInfoList { - // When handling events takes time, a scheduling throughput gets impacted negatively - // because of a shared lock within PriorityQueue, which Pop() also requires. - // - // Scheduling-gated Pods never get schedulable with any events, - // except the Pods themselves got updated, which isn't handled by movePodsToActiveOrBackoffQueue. - // So, we can skip them early here so that they don't go through isPodWorthRequeuing, - // which isn't fast enough to keep a sufficient scheduling throughput - // when the number of scheduling-gated Pods in unschedulablePods is large. - // https://github.com/kubernetes/kubernetes/issues/124384 - // This is a hotfix for this issue, which might be changed - // once we have a better general solution for the shared lock issue. - // - // Note that we cannot skip all pInfo.Gated Pods here - // because PreEnqueue plugins apart from the scheduling gate plugin may change the gating status - // with these events. - if pInfo.Gated && pInfo.UnschedulablePlugins.Has(names.SchedulingGates) { - continue - } - - schedulingHint := p.isPodWorthRequeuing(logger, pInfo, event, oldObj, newObj) - if schedulingHint == queueSkip { - // QueueingHintFn determined that this Pod isn't worth putting to activeQ or backoffQ by this event. - logger.V(5).Info("Event is not making pod schedulable", "pod", klog.KObj(pInfo.Pod), "event", event.Label()) - continue - } - - p.unschedulablePods.delete(pInfo.Pod, pInfo.Gated) - queue := p.requeuePodViaQueueingHint(logger, pInfo, schedulingHint, event.Label()) - logger.V(4).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event.Label(), "queue", queue, "hint", schedulingHint) - if queue == activeQ || (p.isPopFromBackoffQEnabled && queue == backoffQ) { - activated = true - } - } - - p.moveRequestCycle = p.activeQ.schedulingCycle() - - if p.isSchedulingQueueHintEnabled { - // AddUnschedulableIfNotPresent might get called for in-flight Pods later, and in - // AddUnschedulableIfNotPresent we need to know whether events were - // observed while scheduling them. - if added := p.activeQ.addEventIfAnyInFlight(oldObj, newObj, event); added { - logger.V(5).Info("Event received while pods are in flight", "event", event.Label()) - } - } - - if activated { - p.activeQ.broadcast() - } -} - -// getUnschedulablePodsWithCrossTopologyTerm returns unschedulable pods which either of following conditions is met: -// - have any affinity term that matches "pod". -// - rejected by PodTopologySpread plugin. -// NOTE: this function assumes lock has been acquired in caller. -func (p *PriorityQueue) getUnschedulablePodsWithCrossTopologyTerm(logger klog.Logger, pod *v1.Pod) []*framework.QueuedPodInfo { - nsLabels := interpodaffinity.GetNamespaceLabelsSnapshot(logger, pod.Namespace, p.nsLister) - - var podsToMove []*framework.QueuedPodInfo - for _, pInfo := range p.unschedulablePods.podInfoMap { - if pInfo.UnschedulablePlugins.Has(podtopologyspread.Name) && pod.Namespace == pInfo.Pod.Namespace { - // This Pod may be schedulable now by this Pod event. - podsToMove = append(podsToMove, pInfo) - continue - } - - for _, term := range pInfo.RequiredAffinityTerms { - if term.Matches(pod, nsLabels) { - podsToMove = append(podsToMove, pInfo) - break - } - } - } - - return podsToMove -} - -// PodsInActiveQ returns all the Pods in the activeQ. -func (p *PriorityQueue) PodsInActiveQ() []*v1.Pod { - return p.activeQ.list() -} - -// PodsInBackoffQ returns all the Pods in the backoffQ. -func (p *PriorityQueue) PodsInBackoffQ() []*v1.Pod { - return p.backoffQ.list() -} - -// UnschedulablePods returns all the pods in unschedulable state. -func (p *PriorityQueue) UnschedulablePods() []*v1.Pod { - var result []*v1.Pod - for _, pInfo := range p.unschedulablePods.podInfoMap { - result = append(result, pInfo.Pod) - } - return result -} - -var pendingPodsSummary = "activeQ:%v; backoffQ:%v; unschedulablePods:%v" - -// GetPod searches for a pod in the activeQ, backoffQ, and unschedulablePods. -func (p *PriorityQueue) GetPod(name, namespace string) (pInfo *framework.QueuedPodInfo, ok bool) { - p.lock.RLock() - defer p.lock.RUnlock() - - pInfoLookup := &framework.QueuedPodInfo{ - PodInfo: &framework.PodInfo{ - Pod: &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - }, - }, - } - if pInfo, ok = p.backoffQ.get(pInfoLookup); ok { - return pInfo, true - } - if pInfo = p.unschedulablePods.get(pInfoLookup.Pod); pInfo != nil { - return pInfo, true - } - - p.activeQ.underRLock(func(unlockedActiveQ unlockedActiveQueueReader) { - pInfo, ok = unlockedActiveQ.get(pInfoLookup) - }) - return -} - -// PendingPods returns all the pending pods in the queue; accompanied by a debugging string -// recording showing the number of pods in each queue respectively. -// This function is used for debugging purposes in the scheduler cache dumper and comparer. -func (p *PriorityQueue) PendingPods() ([]*v1.Pod, string) { - p.lock.RLock() - defer p.lock.RUnlock() - result := p.PodsInActiveQ() - activeQLen := len(result) - backoffQPods := p.PodsInBackoffQ() - backoffQLen := len(backoffQPods) - result = append(result, backoffQPods...) - for _, pInfo := range p.unschedulablePods.podInfoMap { - result = append(result, pInfo.Pod) - } - return result, fmt.Sprintf(pendingPodsSummary, activeQLen, backoffQLen, len(p.unschedulablePods.podInfoMap)) -} - -// Note: this function assumes the caller locks both p.lock.RLock and p.activeQ.getLock().RLock. -func (p *PriorityQueue) nominatedPodToInfo(np podRef, unlockedActiveQ unlockedActiveQueueReader) *framework.PodInfo { - pod := np.toPod() - pInfoLookup := newQueuedPodInfoForLookup(pod) - - queuedPodInfo, exists := unlockedActiveQ.get(pInfoLookup) - if exists { - return queuedPodInfo.PodInfo - } - - queuedPodInfo = p.unschedulablePods.get(pod) - if queuedPodInfo != nil { - return queuedPodInfo.PodInfo - } - - queuedPodInfo, exists = p.backoffQ.get(pInfoLookup) - if exists { - return queuedPodInfo.PodInfo - } - - return &framework.PodInfo{Pod: pod} -} - -// Close closes the priority queue. -func (p *PriorityQueue) Close() { - p.lock.Lock() - defer p.lock.Unlock() - close(p.stop) - p.activeQ.close() - p.activeQ.broadcast() -} - -// NominatedPodsForNode returns a copy of pods that are nominated to run on the given node, -// but they are waiting for other pods to be removed from the node. -// CAUTION: Make sure you don't call this function while taking any queue's lock in any scenario. -func (p *PriorityQueue) NominatedPodsForNode(nodeName string) []*framework.PodInfo { - p.lock.RLock() - defer p.lock.RUnlock() - nominatedPods := p.nominator.nominatedPodsForNode(nodeName) - - pods := make([]*framework.PodInfo, len(nominatedPods)) - p.activeQ.underRLock(func(unlockedActiveQ unlockedActiveQueueReader) { - for i, np := range nominatedPods { - pods[i] = p.nominatedPodToInfo(np, unlockedActiveQ).DeepCopy() - } - }) - return pods -} - -// newQueuedPodInfo builds a QueuedPodInfo object. -func (p *PriorityQueue) newQueuedPodInfo(pod *v1.Pod, plugins ...string) *framework.QueuedPodInfo { - now := p.clock.Now() - // ignore this err since apiserver doesn't properly validate affinity terms - // and we can't fix the validation for backwards compatibility. - podInfo, _ := framework.NewPodInfo(pod) - return &framework.QueuedPodInfo{ - PodInfo: podInfo, - Timestamp: now, - InitialAttemptTimestamp: nil, - UnschedulablePlugins: sets.New(plugins...), - } -} - -// UnschedulablePods holds pods that cannot be scheduled. This data structure -// is used to implement unschedulablePods. -type UnschedulablePods struct { - // podInfoMap is a map key by a pod's full-name and the value is a pointer to the QueuedPodInfo. - podInfoMap map[string]*framework.QueuedPodInfo - keyFunc func(*v1.Pod) string - // unschedulableRecorder/gatedRecorder updates the counter when elements of an unschedulablePodsMap - // get added or removed, and it does nothing if it's nil. - unschedulableRecorder, gatedRecorder metrics.MetricRecorder -} - -// addOrUpdate adds a pod to the unschedulable podInfoMap. -// The event should show which event triggered the addition and is used for the metric recording. -func (u *UnschedulablePods) addOrUpdate(pInfo *framework.QueuedPodInfo, event string) { - podID := u.keyFunc(pInfo.Pod) - if _, exists := u.podInfoMap[podID]; !exists { - if pInfo.Gated && u.gatedRecorder != nil { - u.gatedRecorder.Inc() - } else if !pInfo.Gated && u.unschedulableRecorder != nil { - u.unschedulableRecorder.Inc() - } - metrics.SchedulerQueueIncomingPods.WithLabelValues("unschedulable", event).Inc() - } - u.podInfoMap[podID] = pInfo -} - -// delete deletes a pod from the unschedulable podInfoMap. -// The `gated` parameter is used to figure out which metric should be decreased. -func (u *UnschedulablePods) delete(pod *v1.Pod, gated bool) { - podID := u.keyFunc(pod) - if _, exists := u.podInfoMap[podID]; exists { - if gated && u.gatedRecorder != nil { - u.gatedRecorder.Dec() - } else if !gated && u.unschedulableRecorder != nil { - u.unschedulableRecorder.Dec() - } - } - delete(u.podInfoMap, podID) -} - -// get returns the QueuedPodInfo if a pod with the same key as the key of the given "pod" -// is found in the map. It returns nil otherwise. -func (u *UnschedulablePods) get(pod *v1.Pod) *framework.QueuedPodInfo { - podKey := u.keyFunc(pod) - if pInfo, exists := u.podInfoMap[podKey]; exists { - return pInfo - } - return nil -} - -// clear removes all the entries from the unschedulable podInfoMap. -func (u *UnschedulablePods) clear() { - u.podInfoMap = make(map[string]*framework.QueuedPodInfo) - if u.unschedulableRecorder != nil { - u.unschedulableRecorder.Clear() - } - if u.gatedRecorder != nil { - u.gatedRecorder.Clear() - } -} - -// newUnschedulablePods initializes a new object of UnschedulablePods. -func newUnschedulablePods(unschedulableRecorder, gatedRecorder metrics.MetricRecorder) *UnschedulablePods { - return &UnschedulablePods{ - podInfoMap: make(map[string]*framework.QueuedPodInfo), - keyFunc: util.GetPodFullName, - unschedulableRecorder: unschedulableRecorder, - gatedRecorder: gatedRecorder, - } -} - -func podInfoKeyFunc(pInfo *framework.QueuedPodInfo) string { - return cache.NewObjectName(pInfo.Pod.Namespace, pInfo.Pod.Name).String() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/testing.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/testing.go deleted file mode 100644 index f619bac8495..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/backend/queue/testing.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package queue - -import ( - "context" - "time" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/metrics" -) - -// NewTestQueue creates a priority queue with an empty informer factory. -func NewTestQueue(ctx context.Context, lessFn framework.LessFunc, opts ...Option) *PriorityQueue { - return NewTestQueueWithObjects(ctx, lessFn, nil, opts...) -} - -// NewTestQueueWithObjects creates a priority queue with an informer factory -// populated with the provided objects. -func NewTestQueueWithObjects( - ctx context.Context, - lessFn framework.LessFunc, - objs []runtime.Object, - opts ...Option, -) *PriorityQueue { - informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(objs...), 0) - - // Because some major functions (e.g., Pop) requires the metric recorder to be set, - // we always set a metric recorder here. - recorder := metrics.NewMetricsAsyncRecorder(10, 20*time.Microsecond, ctx.Done()) - // We set it before the options that users provide, so that users can override it. - opts = append([]Option{WithMetricsRecorder(*recorder)}, opts...) - return NewTestQueueWithInformerFactory(ctx, lessFn, informerFactory, opts...) -} - -func NewTestQueueWithInformerFactory( - ctx context.Context, - lessFn framework.LessFunc, - informerFactory informers.SharedInformerFactory, - opts ...Option, -) *PriorityQueue { - pq := NewPriorityQueue(lessFn, informerFactory, opts...) - informerFactory.Start(ctx.Done()) - informerFactory.WaitForCacheSync(ctx.Done()) - return pq -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go deleted file mode 100644 index 09c4200ff3a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go +++ /dev/null @@ -1,690 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduler - -import ( - "context" - "fmt" - "strings" - "time" - - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/dynamic/dynamicinformer" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - corev1helpers "k8s.io/component-helpers/scheduling/corev1" - corev1nodeaffinity "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" - resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/backend/queue" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" - "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/kubernetes/pkg/scheduler/profile" - "k8s.io/kubernetes/pkg/scheduler/util/assumecache" -) - -func (sched *Scheduler) addNodeToCache(obj interface{}) { - evt := framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add} - start := time.Now() - defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start)) - logger := sched.logger - node, ok := obj.(*v1.Node) - if !ok { - logger.Error(nil, "Cannot convert to *v1.Node", "obj", obj) - return - } - - logger.V(3).Info("Add event for node", "node", klog.KObj(node)) - nodeInfo := sched.Cache.AddNode(logger, node) - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, nil, node, preCheckForNode(nodeInfo)) -} - -func (sched *Scheduler) updateNodeInCache(oldObj, newObj interface{}) { - start := time.Now() - logger := sched.logger - oldNode, ok := oldObj.(*v1.Node) - if !ok { - logger.Error(nil, "Cannot convert oldObj to *v1.Node", "oldObj", oldObj) - return - } - newNode, ok := newObj.(*v1.Node) - if !ok { - logger.Error(nil, "Cannot convert newObj to *v1.Node", "newObj", newObj) - return - } - - logger.V(4).Info("Update event for node", "node", klog.KObj(newNode)) - nodeInfo := sched.Cache.UpdateNode(logger, oldNode, newNode) - events := framework.NodeSchedulingPropertiesChange(newNode, oldNode) - - // Save the time it takes to update the node in the cache. - updatingDuration := metrics.SinceInSeconds(start) - - // Only requeue unschedulable pods if the node became more schedulable. - for _, evt := range events { - startMoving := time.Now() - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, oldNode, newNode, preCheckForNode(nodeInfo)) - movingDuration := metrics.SinceInSeconds(startMoving) - - metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(updatingDuration + movingDuration) - } -} - -func (sched *Scheduler) deleteNodeFromCache(obj interface{}) { - evt := framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Delete} - start := time.Now() - defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start)) - - logger := sched.logger - var node *v1.Node - switch t := obj.(type) { - case *v1.Node: - node = t - case cache.DeletedFinalStateUnknown: - var ok bool - node, ok = t.Obj.(*v1.Node) - if !ok { - logger.Error(nil, "Cannot convert to *v1.Node", "obj", t.Obj) - return - } - default: - logger.Error(nil, "Cannot convert to *v1.Node", "obj", t) - return - } - - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, node, nil, nil) - - logger.V(3).Info("Delete event for node", "node", klog.KObj(node)) - if err := sched.Cache.RemoveNode(logger, node); err != nil { - logger.Error(err, "Scheduler cache RemoveNode failed") - } -} - -func (sched *Scheduler) addPodToSchedulingQueue(obj interface{}) { - start := time.Now() - defer metrics.EventHandlingLatency.WithLabelValues(framework.EventUnscheduledPodAdd.Label()).Observe(metrics.SinceInSeconds(start)) - - logger := sched.logger - pod := obj.(*v1.Pod) - logger.V(3).Info("Add event for unscheduled pod", "pod", klog.KObj(pod)) - sched.SchedulingQueue.Add(logger, pod) -} - -func (sched *Scheduler) updatePodInSchedulingQueue(oldObj, newObj interface{}) { - start := time.Now() - logger := sched.logger - oldPod, newPod := oldObj.(*v1.Pod), newObj.(*v1.Pod) - // Bypass update event that carries identical objects; otherwise, a duplicated - // Pod may go through scheduling and cause unexpected behavior (see #96071). - if oldPod.ResourceVersion == newPod.ResourceVersion { - return - } - - defer metrics.EventHandlingLatency.WithLabelValues(framework.EventUnscheduledPodUpdate.Label()).Observe(metrics.SinceInSeconds(start)) - for _, evt := range framework.PodSchedulingPropertiesChange(newPod, oldPod) { - if evt.Label() != framework.EventUnscheduledPodUpdate.Label() { - defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start)) - } - } - - isAssumed, err := sched.Cache.IsAssumedPod(newPod) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to check whether pod %s/%s is assumed: %v", newPod.Namespace, newPod.Name, err)) - } - if isAssumed { - return - } - - logger.V(4).Info("Update event for unscheduled pod", "pod", klog.KObj(newPod)) - sched.SchedulingQueue.Update(logger, oldPod, newPod) - if hasNominatedNodeNameChanged(oldPod, newPod) { - // Nominated node changed in pod, so we need to treat it as if the pod was deleted from the old nominated node, - // because the scheduler treats such a pod as if it was already assigned when scheduling lower or equal priority pods. - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, oldPod, nil, getLEPriorityPreCheck(corev1helpers.PodPriority(oldPod))) - } -} - -// hasNominatedNodeNameChanged returns true when nominated node name has existed but changed. -func hasNominatedNodeNameChanged(oldPod, newPod *v1.Pod) bool { - return len(oldPod.Status.NominatedNodeName) > 0 && oldPod.Status.NominatedNodeName != newPod.Status.NominatedNodeName -} - -func (sched *Scheduler) deletePodFromSchedulingQueue(obj interface{}) { - start := time.Now() - defer metrics.EventHandlingLatency.WithLabelValues(framework.EventUnscheduledPodDelete.Label()).Observe(metrics.SinceInSeconds(start)) - - logger := sched.logger - var pod *v1.Pod - switch t := obj.(type) { - case *v1.Pod: - pod = obj.(*v1.Pod) - case cache.DeletedFinalStateUnknown: - var ok bool - pod, ok = t.Obj.(*v1.Pod) - if !ok { - utilruntime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, sched)) - return - } - default: - utilruntime.HandleError(fmt.Errorf("unable to handle object in %T: %T", sched, obj)) - return - } - - logger.V(3).Info("Delete event for unscheduled pod", "pod", klog.KObj(pod)) - sched.SchedulingQueue.Delete(pod) - fwk, err := sched.frameworkForPod(pod) - if err != nil { - // This shouldn't happen, because we only accept for scheduling the pods - // which specify a scheduler name that matches one of the profiles. - logger.Error(err, "Unable to get profile", "pod", klog.KObj(pod)) - return - } - // If a waiting pod is rejected, it indicates it's previously assumed and we're - // removing it from the scheduler cache. In this case, signal a AssignedPodDelete - // event to immediately retry some unscheduled Pods. - // Similarly when a pod that had nominated node is deleted, it can unblock scheduling of other pods, - // because the lower or equal priority pods treat such a pod as if it was assigned. - if fwk.RejectWaitingPod(pod.UID) { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, pod, nil, nil) - } else if pod.Status.NominatedNodeName != "" { - // Note that a nominated pod can fall into `RejectWaitingPod` case as well, - // but in that case the `MoveAllToActiveOrBackoffQueue` already covered lower priority pods. - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, pod, nil, getLEPriorityPreCheck(corev1helpers.PodPriority(pod))) - } -} - -// getLEPriorityPreCheck is a PreEnqueueCheck function that selects only lower or equal priority pods. -func getLEPriorityPreCheck(priority int32) queue.PreEnqueueCheck { - return func(pod *v1.Pod) bool { - return corev1helpers.PodPriority(pod) <= priority - } -} - -func (sched *Scheduler) addPodToCache(obj interface{}) { - start := time.Now() - defer metrics.EventHandlingLatency.WithLabelValues(framework.EventAssignedPodAdd.Label()).Observe(metrics.SinceInSeconds(start)) - - logger := sched.logger - pod, ok := obj.(*v1.Pod) - if !ok { - logger.Error(nil, "Cannot convert to *v1.Pod", "obj", obj) - return - } - - logger.V(3).Info("Add event for scheduled pod", "pod", klog.KObj(pod)) - if err := sched.Cache.AddPod(logger, pod); err != nil { - logger.Error(err, "Scheduler cache AddPod failed", "pod", klog.KObj(pod)) - } - - // SchedulingQueue.AssignedPodAdded has a problem: - // It internally pre-filters Pods to move to activeQ, - // while taking only in-tree plugins into consideration. - // Consequently, if custom plugins that subscribes Pod/Add events reject Pods, - // those Pods will never be requeued to activeQ by an assigned Pod related events, - // and they may be stuck in unschedulableQ. - // - // Here we use MoveAllToActiveOrBackoffQueue only when QueueingHint is enabled. - // (We cannot switch to MoveAllToActiveOrBackoffQueue right away because of throughput concern.) - if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodAdd, nil, pod, nil) - } else { - sched.SchedulingQueue.AssignedPodAdded(logger, pod) - } -} - -func (sched *Scheduler) updatePodInCache(oldObj, newObj interface{}) { - start := time.Now() - defer metrics.EventHandlingLatency.WithLabelValues(framework.EventAssignedPodUpdate.Label()).Observe(metrics.SinceInSeconds(start)) - - logger := sched.logger - oldPod, ok := oldObj.(*v1.Pod) - if !ok { - logger.Error(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj) - return - } - newPod, ok := newObj.(*v1.Pod) - if !ok { - logger.Error(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj) - return - } - - logger.V(4).Info("Update event for scheduled pod", "pod", klog.KObj(oldPod)) - if err := sched.Cache.UpdatePod(logger, oldPod, newPod); err != nil { - logger.Error(err, "Scheduler cache UpdatePod failed", "pod", klog.KObj(oldPod)) - } - - events := framework.PodSchedulingPropertiesChange(newPod, oldPod) - - // Save the time it takes to update the pod in the cache. - updatingDuration := metrics.SinceInSeconds(start) - - for _, evt := range events { - startMoving := time.Now() - // SchedulingQueue.AssignedPodUpdated has a problem: - // It internally pre-filters Pods to move to activeQ, - // while taking only in-tree plugins into consideration. - // Consequently, if custom plugins that subscribes Pod/Update events reject Pods, - // those Pods will never be requeued to activeQ by an assigned Pod related events, - // and they may be stuck in unschedulableQ. - // - // Here we use MoveAllToActiveOrBackoffQueue only when QueueingHint is enabled. - // (We cannot switch to MoveAllToActiveOrBackoffQueue right away because of throughput concern.) - if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, oldPod, newPod, nil) - } else { - sched.SchedulingQueue.AssignedPodUpdated(logger, oldPod, newPod, evt) - } - movingDuration := metrics.SinceInSeconds(startMoving) - metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(updatingDuration + movingDuration) - } -} - -func (sched *Scheduler) deletePodFromCache(obj interface{}) { - start := time.Now() - defer metrics.EventHandlingLatency.WithLabelValues(framework.EventAssignedPodDelete.Label()).Observe(metrics.SinceInSeconds(start)) - - logger := sched.logger - var pod *v1.Pod - switch t := obj.(type) { - case *v1.Pod: - pod = t - case cache.DeletedFinalStateUnknown: - var ok bool - pod, ok = t.Obj.(*v1.Pod) - if !ok { - logger.Error(nil, "Cannot convert to *v1.Pod", "obj", t.Obj) - return - } - default: - logger.Error(nil, "Cannot convert to *v1.Pod", "obj", t) - return - } - - logger.V(3).Info("Delete event for scheduled pod", "pod", klog.KObj(pod)) - if err := sched.Cache.RemovePod(logger, pod); err != nil { - logger.Error(err, "Scheduler cache RemovePod failed", "pod", klog.KObj(pod)) - } - - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, pod, nil, nil) -} - -// assignedPod selects pods that are assigned (scheduled and running). -func assignedPod(pod *v1.Pod) bool { - return len(pod.Spec.NodeName) != 0 -} - -// responsibleForPod returns true if the pod has asked to be scheduled by the given scheduler. -func responsibleForPod(pod *v1.Pod, profiles profile.Map) bool { - return profiles.HandlesSchedulerName(pod.Spec.SchedulerName) -} - -const ( - // syncedPollPeriod controls how often you look at the status of your sync funcs - syncedPollPeriod = 100 * time.Millisecond -) - -// WaitForHandlersSync waits for EventHandlers to sync. -// It returns true if it was successful, false if the controller should shut down -func (sched *Scheduler) WaitForHandlersSync(ctx context.Context) error { - return wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(ctx context.Context) (done bool, err error) { - for _, handler := range sched.registeredHandlers { - if !handler.HasSynced() { - return false, nil - } - } - return true, nil - }) -} - -// addAllEventHandlers is a helper function used in tests and in Scheduler -// to add event handlers for various informers. -func addAllEventHandlers( - sched *Scheduler, - informerFactory informers.SharedInformerFactory, - dynInformerFactory dynamicinformer.DynamicSharedInformerFactory, - resourceClaimCache *assumecache.AssumeCache, - resourceSliceTracker *resourceslicetracker.Tracker, - gvkMap map[framework.EventResource]framework.ActionType, -) error { - var ( - handlerRegistration cache.ResourceEventHandlerRegistration - err error - handlers []cache.ResourceEventHandlerRegistration - ) - // scheduled pod cache - if handlerRegistration, err = informerFactory.Core().V1().Pods().Informer().AddEventHandler( - cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - switch t := obj.(type) { - case *v1.Pod: - return assignedPod(t) - case cache.DeletedFinalStateUnknown: - if _, ok := t.Obj.(*v1.Pod); ok { - // The carried object may be stale, so we don't use it to check if - // it's assigned or not. Attempting to cleanup anyways. - return true - } - utilruntime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, sched)) - return false - default: - utilruntime.HandleError(fmt.Errorf("unable to handle object in %T: %T", sched, obj)) - return false - } - }, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: sched.addPodToCache, - UpdateFunc: sched.updatePodInCache, - DeleteFunc: sched.deletePodFromCache, - }, - }, - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - - // unscheduled pod queue - if handlerRegistration, err = informerFactory.Core().V1().Pods().Informer().AddEventHandler( - cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - switch t := obj.(type) { - case *v1.Pod: - return !assignedPod(t) && responsibleForPod(t, sched.Profiles) - case cache.DeletedFinalStateUnknown: - if pod, ok := t.Obj.(*v1.Pod); ok { - // The carried object may be stale, so we don't use it to check if - // it's assigned or not. - return responsibleForPod(pod, sched.Profiles) - } - utilruntime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, sched)) - return false - default: - utilruntime.HandleError(fmt.Errorf("unable to handle object in %T: %T", sched, obj)) - return false - } - }, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: sched.addPodToSchedulingQueue, - UpdateFunc: sched.updatePodInSchedulingQueue, - DeleteFunc: sched.deletePodFromSchedulingQueue, - }, - }, - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - - if handlerRegistration, err = informerFactory.Core().V1().Nodes().Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: sched.addNodeToCache, - UpdateFunc: sched.updateNodeInCache, - DeleteFunc: sched.deleteNodeFromCache, - }, - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - - logger := sched.logger - buildEvtResHandler := func(at framework.ActionType, resource framework.EventResource) cache.ResourceEventHandlerFuncs { - funcs := cache.ResourceEventHandlerFuncs{} - if at&framework.Add != 0 { - evt := framework.ClusterEvent{Resource: resource, ActionType: framework.Add} - funcs.AddFunc = func(obj interface{}) { - start := time.Now() - defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start)) - if resource == framework.StorageClass && !utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) { - sc, ok := obj.(*storagev1.StorageClass) - if !ok { - logger.Error(nil, "Cannot convert to *storagev1.StorageClass", "obj", obj) - return - } - - // CheckVolumeBindingPred fails if pod has unbound immediate PVCs. If these - // PVCs have specified StorageClass name, creating StorageClass objects - // with late binding will cause predicates to pass, so we need to move pods - // to active queue. - // We don't need to invalidate cached results because results will not be - // cached for pod that has unbound immediate PVCs. - if sc.VolumeBindingMode == nil || *sc.VolumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer { - return - } - } - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, nil, obj, nil) - } - } - if at&framework.Update != 0 { - evt := framework.ClusterEvent{Resource: resource, ActionType: framework.Update} - funcs.UpdateFunc = func(old, obj interface{}) { - start := time.Now() - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, old, obj, nil) - metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start)) - } - } - if at&framework.Delete != 0 { - evt := framework.ClusterEvent{Resource: resource, ActionType: framework.Delete} - funcs.DeleteFunc = func(obj interface{}) { - start := time.Now() - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, obj, nil, nil) - metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start)) - } - } - return funcs - } - - for gvk, at := range gvkMap { - switch gvk { - case framework.Node, framework.Pod: - // Do nothing. - case framework.CSINode: - if handlerRegistration, err = informerFactory.Storage().V1().CSINodes().Informer().AddEventHandler( - buildEvtResHandler(at, framework.CSINode), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - case framework.CSIDriver: - if handlerRegistration, err = informerFactory.Storage().V1().CSIDrivers().Informer().AddEventHandler( - buildEvtResHandler(at, framework.CSIDriver), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - case framework.CSIStorageCapacity: - if handlerRegistration, err = informerFactory.Storage().V1().CSIStorageCapacities().Informer().AddEventHandler( - buildEvtResHandler(at, framework.CSIStorageCapacity), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - case framework.PersistentVolume: - // MaxPDVolumeCountPredicate: since it relies on the counts of PV. - // - // PvAdd: Pods created when there are no PVs available will be stuck in - // unschedulable queue. But unbound PVs created for static provisioning and - // delay binding storage class are skipped in PV controller dynamic - // provisioning and binding process, will not trigger events to schedule pod - // again. So we need to move pods to active queue on PV add for this - // scenario. - // - // PvUpdate: Scheduler.bindVolumesWorker may fail to update assumed pod volume - // bindings due to conflicts if PVs are updated by PV controller or other - // parties, then scheduler will add pod back to unschedulable queue. We - // need to move pods to active queue on PV update for this scenario. - if handlerRegistration, err = informerFactory.Core().V1().PersistentVolumes().Informer().AddEventHandler( - buildEvtResHandler(at, framework.PersistentVolume), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - case framework.PersistentVolumeClaim: - // MaxPDVolumeCountPredicate: add/update PVC will affect counts of PV when it is bound. - if handlerRegistration, err = informerFactory.Core().V1().PersistentVolumeClaims().Informer().AddEventHandler( - buildEvtResHandler(at, framework.PersistentVolumeClaim), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - case framework.ResourceClaim: - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - handlerRegistration = resourceClaimCache.AddEventHandler( - buildEvtResHandler(at, framework.ResourceClaim), - ) - handlers = append(handlers, handlerRegistration) - } - case framework.ResourceSlice: - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - if handlerRegistration, err = resourceSliceTracker.AddEventHandler( - buildEvtResHandler(at, framework.ResourceSlice), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - } - case framework.DeviceClass: - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - if handlerRegistration, err = informerFactory.Resource().V1beta1().DeviceClasses().Informer().AddEventHandler( - buildEvtResHandler(at, framework.DeviceClass), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - } - case framework.StorageClass: - if handlerRegistration, err = informerFactory.Storage().V1().StorageClasses().Informer().AddEventHandler( - buildEvtResHandler(at, framework.StorageClass), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - case framework.VolumeAttachment: - if handlerRegistration, err = informerFactory.Storage().V1().VolumeAttachments().Informer().AddEventHandler( - buildEvtResHandler(at, framework.VolumeAttachment), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - default: - // Tests may not instantiate dynInformerFactory. - if dynInformerFactory == nil { - continue - } - // GVK is expected to be at least 3-folded, separated by dots. - // .. - // Valid examples: - // - foos.v1.example.com - // - bars.v1beta1.a.b.c - // Invalid examples: - // - foos.v1 (2 sections) - // - foo.v1.example.com (the first section should be plural) - if strings.Count(string(gvk), ".") < 2 { - logger.Error(nil, "incorrect event registration", "gvk", gvk) - continue - } - // Fall back to try dynamic informers. - gvr, _ := schema.ParseResourceArg(string(gvk)) - dynInformer := dynInformerFactory.ForResource(*gvr).Informer() - if handlerRegistration, err = dynInformer.AddEventHandler( - buildEvtResHandler(at, gvk), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - } - } - sched.registeredHandlers = handlers - return nil -} - -func preCheckForNode(nodeInfo *framework.NodeInfo) queue.PreEnqueueCheck { - if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) { - // QHint is initially created from the motivation of replacing this preCheck. - // It assumes that the scheduler only has in-tree plugins, which is problematic for our extensibility. - // Here, we skip preCheck if QHint is enabled, and we eventually remove it after QHint is graduated. - return nil - } - - // Note: the following checks doesn't take preemption into considerations, in very rare - // cases (e.g., node resizing), "pod" may still fail a check but preemption helps. We deliberately - // chose to ignore those cases as unschedulable pods will be re-queued eventually. - return func(pod *v1.Pod) bool { - admissionResults := AdmissionCheck(pod, nodeInfo, false) - if len(admissionResults) != 0 { - return false - } - _, isUntolerated := corev1helpers.FindMatchingUntoleratedTaint(nodeInfo.Node().Spec.Taints, pod.Spec.Tolerations, func(t *v1.Taint) bool { - return t.Effect == v1.TaintEffectNoSchedule - }) - return !isUntolerated - } -} - -// AdmissionCheck calls the filtering logic of noderesources/nodeport/nodeAffinity/nodename -// and returns the failure reasons. It's used in kubelet(pkg/kubelet/lifecycle/predicate.go) and scheduler. -// It returns the first failure if `includeAllFailures` is set to false; otherwise -// returns all failures. -func AdmissionCheck(pod *v1.Pod, nodeInfo *framework.NodeInfo, includeAllFailures bool) []AdmissionResult { - var admissionResults []AdmissionResult - insufficientResources := noderesources.Fits(pod, nodeInfo, noderesources.ResourceRequestsOptions{ - EnablePodLevelResources: utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources), - }) - if len(insufficientResources) != 0 { - for i := range insufficientResources { - admissionResults = append(admissionResults, AdmissionResult{InsufficientResource: &insufficientResources[i]}) - } - if !includeAllFailures { - return admissionResults - } - } - - if matches, _ := corev1nodeaffinity.GetRequiredNodeAffinity(pod).Match(nodeInfo.Node()); !matches { - admissionResults = append(admissionResults, AdmissionResult{Name: nodeaffinity.Name, Reason: nodeaffinity.ErrReasonPod}) - if !includeAllFailures { - return admissionResults - } - } - if !nodename.Fits(pod, nodeInfo) { - admissionResults = append(admissionResults, AdmissionResult{Name: nodename.Name, Reason: nodename.ErrReason}) - if !includeAllFailures { - return admissionResults - } - } - if !nodeports.Fits(pod, nodeInfo) { - admissionResults = append(admissionResults, AdmissionResult{Name: nodeports.Name, Reason: nodeports.ErrReason}) - if !includeAllFailures { - return admissionResults - } - } - return admissionResults -} - -// AdmissionResult describes the reason why Scheduler can't admit the pod. -// If the reason is a resource fit one, then AdmissionResult.InsufficientResource includes the details. -type AdmissionResult struct { - Name string - Reason string - InsufficientResource *noderesources.InsufficientResource -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go deleted file mode 100644 index 2a4866c4df1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go +++ /dev/null @@ -1,456 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduler - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "net/http" - "strings" - "time" - - v1 "k8s.io/api/core/v1" - utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/sets" - restclient "k8s.io/client-go/rest" - extenderv1 "k8s.io/kube-scheduler/extender/v1" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -const ( - // DefaultExtenderTimeout defines the default extender timeout in second. - DefaultExtenderTimeout = 5 * time.Second -) - -// HTTPExtender implements the Extender interface. -type HTTPExtender struct { - extenderURL string - preemptVerb string - filterVerb string - prioritizeVerb string - bindVerb string - weight int64 - client *http.Client - nodeCacheCapable bool - managedResources sets.Set[string] - ignorable bool -} - -func makeTransport(config *schedulerapi.Extender) (http.RoundTripper, error) { - var cfg restclient.Config - if config.TLSConfig != nil { - cfg.TLSClientConfig.Insecure = config.TLSConfig.Insecure - cfg.TLSClientConfig.ServerName = config.TLSConfig.ServerName - cfg.TLSClientConfig.CertFile = config.TLSConfig.CertFile - cfg.TLSClientConfig.KeyFile = config.TLSConfig.KeyFile - cfg.TLSClientConfig.CAFile = config.TLSConfig.CAFile - cfg.TLSClientConfig.CertData = config.TLSConfig.CertData - cfg.TLSClientConfig.KeyData = config.TLSConfig.KeyData - cfg.TLSClientConfig.CAData = config.TLSConfig.CAData - } - if config.EnableHTTPS { - hasCA := len(cfg.CAFile) > 0 || len(cfg.CAData) > 0 - if !hasCA { - cfg.Insecure = true - } - } - tlsConfig, err := restclient.TLSConfigFor(&cfg) - if err != nil { - return nil, err - } - if tlsConfig != nil { - return utilnet.SetTransportDefaults(&http.Transport{ - TLSClientConfig: tlsConfig, - }), nil - } - return utilnet.SetTransportDefaults(&http.Transport{}), nil -} - -// NewHTTPExtender creates an HTTPExtender object. -func NewHTTPExtender(config *schedulerapi.Extender) (framework.Extender, error) { - if config.HTTPTimeout.Duration.Nanoseconds() == 0 { - config.HTTPTimeout.Duration = time.Duration(DefaultExtenderTimeout) - } - - transport, err := makeTransport(config) - if err != nil { - return nil, err - } - client := &http.Client{ - Transport: transport, - Timeout: config.HTTPTimeout.Duration, - } - managedResources := sets.New[string]() - for _, r := range config.ManagedResources { - managedResources.Insert(string(r.Name)) - } - return &HTTPExtender{ - extenderURL: config.URLPrefix, - preemptVerb: config.PreemptVerb, - filterVerb: config.FilterVerb, - prioritizeVerb: config.PrioritizeVerb, - bindVerb: config.BindVerb, - weight: config.Weight, - client: client, - nodeCacheCapable: config.NodeCacheCapable, - managedResources: managedResources, - ignorable: config.Ignorable, - }, nil -} - -// Name returns extenderURL to identify the extender. -func (h *HTTPExtender) Name() string { - return h.extenderURL -} - -// IsIgnorable returns true indicates scheduling should not fail when this extender -// is unavailable -func (h *HTTPExtender) IsIgnorable() bool { - return h.ignorable -} - -// SupportsPreemption returns true if an extender supports preemption. -// An extender should have preempt verb defined and enabled its own node cache. -func (h *HTTPExtender) SupportsPreemption() bool { - return len(h.preemptVerb) > 0 -} - -// ProcessPreemption returns filtered candidate nodes and victims after running preemption logic in extender. -func (h *HTTPExtender) ProcessPreemption( - pod *v1.Pod, - nodeNameToVictims map[string]*extenderv1.Victims, - nodeInfos framework.NodeInfoLister, -) (map[string]*extenderv1.Victims, error) { - var ( - result extenderv1.ExtenderPreemptionResult - args *extenderv1.ExtenderPreemptionArgs - ) - - if !h.SupportsPreemption() { - return nil, fmt.Errorf("preempt verb is not defined for extender %v but run into ProcessPreemption", h.extenderURL) - } - - if h.nodeCacheCapable { - // If extender has cached node info, pass NodeNameToMetaVictims in args. - nodeNameToMetaVictims := convertToMetaVictims(nodeNameToVictims) - args = &extenderv1.ExtenderPreemptionArgs{ - Pod: pod, - NodeNameToMetaVictims: nodeNameToMetaVictims, - } - } else { - args = &extenderv1.ExtenderPreemptionArgs{ - Pod: pod, - NodeNameToVictims: nodeNameToVictims, - } - } - - if err := h.send(h.preemptVerb, args, &result); err != nil { - return nil, err - } - - // Extender will always return NodeNameToMetaVictims. - // So let's convert it to NodeNameToVictims by using . - newNodeNameToVictims, err := h.convertToVictims(result.NodeNameToMetaVictims, nodeInfos) - if err != nil { - return nil, err - } - // Do not override . - return newNodeNameToVictims, nil -} - -// convertToVictims converts "nodeNameToMetaVictims" from object identifiers, -// such as UIDs and names, to object pointers. -func (h *HTTPExtender) convertToVictims( - nodeNameToMetaVictims map[string]*extenderv1.MetaVictims, - nodeInfos framework.NodeInfoLister, -) (map[string]*extenderv1.Victims, error) { - nodeNameToVictims := map[string]*extenderv1.Victims{} - for nodeName, metaVictims := range nodeNameToMetaVictims { - nodeInfo, err := nodeInfos.Get(nodeName) - if err != nil { - return nil, err - } - victims := &extenderv1.Victims{ - Pods: []*v1.Pod{}, - NumPDBViolations: metaVictims.NumPDBViolations, - } - for _, metaPod := range metaVictims.Pods { - pod, err := h.convertPodUIDToPod(metaPod, nodeInfo) - if err != nil { - return nil, err - } - victims.Pods = append(victims.Pods, pod) - } - nodeNameToVictims[nodeName] = victims - } - return nodeNameToVictims, nil -} - -// convertPodUIDToPod returns v1.Pod object for given MetaPod and node info. -// The v1.Pod object is restored by nodeInfo.Pods(). -// It returns an error if there's cache inconsistency between default scheduler -// and extender, i.e. when the pod is not found in nodeInfo.Pods. -func (h *HTTPExtender) convertPodUIDToPod( - metaPod *extenderv1.MetaPod, - nodeInfo *framework.NodeInfo) (*v1.Pod, error) { - for _, p := range nodeInfo.Pods { - if string(p.Pod.UID) == metaPod.UID { - return p.Pod, nil - } - } - return nil, fmt.Errorf("extender: %v claims to preempt pod (UID: %v) on node: %v, but the pod is not found on that node", - h.extenderURL, metaPod, nodeInfo.Node().Name) -} - -// convertToMetaVictims converts from struct type to meta types. -func convertToMetaVictims( - nodeNameToVictims map[string]*extenderv1.Victims, -) map[string]*extenderv1.MetaVictims { - nodeNameToMetaVictims := map[string]*extenderv1.MetaVictims{} - for node, victims := range nodeNameToVictims { - metaVictims := &extenderv1.MetaVictims{ - Pods: []*extenderv1.MetaPod{}, - NumPDBViolations: victims.NumPDBViolations, - } - for _, pod := range victims.Pods { - metaPod := &extenderv1.MetaPod{ - UID: string(pod.UID), - } - metaVictims.Pods = append(metaVictims.Pods, metaPod) - } - nodeNameToMetaVictims[node] = metaVictims - } - return nodeNameToMetaVictims -} - -// Filter based on extender implemented predicate functions. The filtered list is -// expected to be a subset of the supplied list; otherwise the function returns an error. -// The failedNodes and failedAndUnresolvableNodes optionally contains the list -// of failed nodes and failure reasons, except nodes in the latter are -// unresolvable. -func (h *HTTPExtender) Filter( - pod *v1.Pod, - nodes []*framework.NodeInfo, -) (filteredList []*framework.NodeInfo, failedNodes, failedAndUnresolvableNodes extenderv1.FailedNodesMap, err error) { - var ( - result extenderv1.ExtenderFilterResult - nodeList *v1.NodeList - nodeNames *[]string - nodeResult []*framework.NodeInfo - args *extenderv1.ExtenderArgs - ) - fromNodeName := make(map[string]*framework.NodeInfo) - for _, n := range nodes { - fromNodeName[n.Node().Name] = n - } - - if h.filterVerb == "" { - return nodes, extenderv1.FailedNodesMap{}, extenderv1.FailedNodesMap{}, nil - } - - if h.nodeCacheCapable { - nodeNameSlice := make([]string, 0, len(nodes)) - for _, node := range nodes { - nodeNameSlice = append(nodeNameSlice, node.Node().Name) - } - nodeNames = &nodeNameSlice - } else { - nodeList = &v1.NodeList{} - for _, node := range nodes { - nodeList.Items = append(nodeList.Items, *node.Node()) - } - } - - args = &extenderv1.ExtenderArgs{ - Pod: pod, - Nodes: nodeList, - NodeNames: nodeNames, - } - - if err := h.send(h.filterVerb, args, &result); err != nil { - return nil, nil, nil, err - } - if result.Error != "" { - return nil, nil, nil, errors.New(result.Error) - } - - if h.nodeCacheCapable && result.NodeNames != nil { - nodeResult = make([]*framework.NodeInfo, len(*result.NodeNames)) - for i, nodeName := range *result.NodeNames { - if n, ok := fromNodeName[nodeName]; ok { - nodeResult[i] = n - } else { - return nil, nil, nil, fmt.Errorf( - "extender %q claims a filtered node %q which is not found in the input node list", - h.extenderURL, nodeName) - } - } - } else if result.Nodes != nil { - nodeResult = make([]*framework.NodeInfo, len(result.Nodes.Items)) - for i := range result.Nodes.Items { - nodeResult[i] = framework.NewNodeInfo() - nodeResult[i].SetNode(&result.Nodes.Items[i]) - } - } - - return nodeResult, result.FailedNodes, result.FailedAndUnresolvableNodes, nil -} - -// Prioritize based on extender implemented priority functions. Weight*priority is added -// up for each such priority function. The returned score is added to the score computed -// by Kubernetes scheduler. The total score is used to do the host selection. -func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*framework.NodeInfo) (*extenderv1.HostPriorityList, int64, error) { - var ( - result extenderv1.HostPriorityList - nodeList *v1.NodeList - nodeNames *[]string - args *extenderv1.ExtenderArgs - ) - - if h.prioritizeVerb == "" { - result := extenderv1.HostPriorityList{} - for _, node := range nodes { - result = append(result, extenderv1.HostPriority{Host: node.Node().Name, Score: 0}) - } - return &result, 0, nil - } - - if h.nodeCacheCapable { - nodeNameSlice := make([]string, 0, len(nodes)) - for _, node := range nodes { - nodeNameSlice = append(nodeNameSlice, node.Node().Name) - } - nodeNames = &nodeNameSlice - } else { - nodeList = &v1.NodeList{} - for _, node := range nodes { - nodeList.Items = append(nodeList.Items, *node.Node()) - } - } - - args = &extenderv1.ExtenderArgs{ - Pod: pod, - Nodes: nodeList, - NodeNames: nodeNames, - } - - if err := h.send(h.prioritizeVerb, args, &result); err != nil { - return nil, 0, err - } - return &result, h.weight, nil -} - -// Bind delegates the action of binding a pod to a node to the extender. -func (h *HTTPExtender) Bind(binding *v1.Binding) error { - var result extenderv1.ExtenderBindingResult - if !h.IsBinder() { - // This shouldn't happen as this extender wouldn't have become a Binder. - return fmt.Errorf("unexpected empty bindVerb in extender") - } - req := &extenderv1.ExtenderBindingArgs{ - PodName: binding.Name, - PodNamespace: binding.Namespace, - PodUID: binding.UID, - Node: binding.Target.Name, - } - if err := h.send(h.bindVerb, req, &result); err != nil { - return err - } - if result.Error != "" { - return errors.New(result.Error) - } - return nil -} - -// IsBinder returns whether this extender is configured for the Bind method. -func (h *HTTPExtender) IsBinder() bool { - return h.bindVerb != "" -} - -// IsPrioritizer returns whether this extender is configured for the Prioritize method. -func (h *HTTPExtender) IsPrioritizer() bool { - return h.prioritizeVerb != "" -} - -// IsFilter returns whether this extender is configured for the Filter method. -func (h *HTTPExtender) IsFilter() bool { - return h.filterVerb != "" -} - -// Helper function to send messages to the extender -func (h *HTTPExtender) send(action string, args interface{}, result interface{}) error { - out, err := json.Marshal(args) - if err != nil { - return err - } - - url := strings.TrimRight(h.extenderURL, "/") + "/" + action - - req, err := http.NewRequest("POST", url, bytes.NewReader(out)) - if err != nil { - return err - } - - req.Header.Set("Content-Type", "application/json") - - resp, err := h.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("failed %v with extender at URL %v, code %v", action, url, resp.StatusCode) - } - - return json.NewDecoder(resp.Body).Decode(result) -} - -// IsInterested returns true if at least one extended resource requested by -// this pod is managed by this extender. -func (h *HTTPExtender) IsInterested(pod *v1.Pod) bool { - if h.managedResources.Len() == 0 { - return true - } - if h.hasManagedResources(pod.Spec.Containers) { - return true - } - if h.hasManagedResources(pod.Spec.InitContainers) { - return true - } - return false -} - -func (h *HTTPExtender) hasManagedResources(containers []v1.Container) bool { - for i := range containers { - container := &containers[i] - for resourceName := range container.Resources.Requests { - if h.managedResources.Has(string(resourceName)) { - return true - } - } - for resourceName := range container.Resources.Limits { - if h.managedResources.Has(string(resourceName)) { - return true - } - } - } - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/cycle_state.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/cycle_state.go deleted file mode 100644 index 710e16daf70..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/cycle_state.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "errors" - "sync" - - "k8s.io/apimachinery/pkg/util/sets" -) - -var ( - // ErrNotFound is the not found error message. - ErrNotFound = errors.New("not found") -) - -// StateData is a generic type for arbitrary data stored in CycleState. -type StateData interface { - // Clone is an interface to make a copy of StateData. For performance reasons, - // clone should make shallow copies for members (e.g., slices or maps) that are not - // impacted by PreFilter's optional AddPod/RemovePod methods. - Clone() StateData -} - -// StateKey is the type of keys stored in CycleState. -type StateKey string - -// CycleState provides a mechanism for plugins to store and retrieve arbitrary data. -// StateData stored by one plugin can be read, altered, or deleted by another plugin. -// CycleState does not provide any data protection, as all plugins are assumed to be -// trusted. -// Note: CycleState uses a sync.Map to back the storage, because it is thread safe. It's aimed to optimize for the "write once and read many times" scenarios. -// It is the recommended pattern used in all in-tree plugins - plugin-specific state is written once in PreFilter/PreScore and afterward read many times in Filter/Score. -type CycleState struct { - // storage is keyed with StateKey, and valued with StateData. - storage sync.Map - // if recordPluginMetrics is true, metrics.PluginExecutionDuration will be recorded for this cycle. - recordPluginMetrics bool - // SkipFilterPlugins are plugins that will be skipped in the Filter extension point. - SkipFilterPlugins sets.Set[string] - // SkipScorePlugins are plugins that will be skipped in the Score extension point. - SkipScorePlugins sets.Set[string] -} - -// NewCycleState initializes a new CycleState and returns its pointer. -func NewCycleState() *CycleState { - return &CycleState{} -} - -// ShouldRecordPluginMetrics returns whether metrics.PluginExecutionDuration metrics should be recorded. -func (c *CycleState) ShouldRecordPluginMetrics() bool { - if c == nil { - return false - } - return c.recordPluginMetrics -} - -// SetRecordPluginMetrics sets recordPluginMetrics to the given value. -func (c *CycleState) SetRecordPluginMetrics(flag bool) { - if c == nil { - return - } - c.recordPluginMetrics = flag -} - -// Clone creates a copy of CycleState and returns its pointer. Clone returns -// nil if the context being cloned is nil. -func (c *CycleState) Clone() *CycleState { - if c == nil { - return nil - } - copy := NewCycleState() - // Safe copy storage in case of overwriting. - c.storage.Range(func(k, v interface{}) bool { - copy.storage.Store(k, v.(StateData).Clone()) - return true - }) - // The below are not mutated, so we don't have to safe copy. - copy.recordPluginMetrics = c.recordPluginMetrics - copy.SkipFilterPlugins = c.SkipFilterPlugins - copy.SkipScorePlugins = c.SkipScorePlugins - - return copy -} - -// Read retrieves data with the given "key" from CycleState. If the key is not -// present, ErrNotFound is returned. -// -// See CycleState for notes on concurrency. -func (c *CycleState) Read(key StateKey) (StateData, error) { - if v, ok := c.storage.Load(key); ok { - return v.(StateData), nil - } - return nil, ErrNotFound -} - -// Write stores the given "val" in CycleState with the given "key". -// -// See CycleState for notes on concurrency. -func (c *CycleState) Write(key StateKey, val StateData) { - c.storage.Store(key, val) -} - -// Delete deletes data with the given key from CycleState. -// -// See CycleState for notes on concurrency. -func (c *CycleState) Delete(key StateKey) { - c.storage.Delete(key) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/events.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/events.go deleted file mode 100644 index 1fe1b0b972b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/events.go +++ /dev/null @@ -1,231 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/component-helpers/resource" - "k8s.io/dynamic-resource-allocation/resourceclaim" - "k8s.io/kubernetes/pkg/features" -) - -// Special event labels. -const ( - // ScheduleAttemptFailure is the event when a schedule attempt fails. - ScheduleAttemptFailure = "ScheduleAttemptFailure" - // BackoffComplete is the event when a pod finishes backoff. - BackoffComplete = "BackoffComplete" - // PopFromBackoffQ is the event when a pod is popped from backoffQ when activeQ is empty. - PopFromBackoffQ = "PopFromBackoffQ" - // ForceActivate is the event when a pod is moved from unschedulablePods/backoffQ - // to activeQ. Usually it's triggered by plugin implementations. - ForceActivate = "ForceActivate" - // UnschedulableTimeout is the event when a pod is moved from unschedulablePods - // due to the timeout specified at pod-max-in-unschedulable-pods-duration. - UnschedulableTimeout = "UnschedulableTimeout" -) - -var ( - // EventAssignedPodAdd is the event when an assigned pod is added. - EventAssignedPodAdd = ClusterEvent{Resource: assignedPod, ActionType: Add} - // EventAssignedPodUpdate is the event when an assigned pod is updated. - EventAssignedPodUpdate = ClusterEvent{Resource: assignedPod, ActionType: Update} - // EventAssignedPodDelete is the event when an assigned pod is deleted. - EventAssignedPodDelete = ClusterEvent{Resource: assignedPod, ActionType: Delete} - // EventUnscheduledPodAdd is the event when an unscheduled pod is added. - EventUnscheduledPodAdd = ClusterEvent{Resource: unschedulablePod, ActionType: Add} - // EventUnscheduledPodUpdate is the event when an unscheduled pod is updated. - EventUnscheduledPodUpdate = ClusterEvent{Resource: unschedulablePod, ActionType: Update} - // EventUnscheduledPodDelete is the event when an unscheduled pod is deleted. - EventUnscheduledPodDelete = ClusterEvent{Resource: unschedulablePod, ActionType: Delete} - // EventUnschedulableTimeout is the event when a pod stays in unschedulable for longer than timeout. - EventUnschedulableTimeout = ClusterEvent{Resource: WildCard, ActionType: All, label: UnschedulableTimeout} - // EventForceActivate is the event when a pod is moved from unschedulablePods/backoffQ to activeQ. - EventForceActivate = ClusterEvent{Resource: WildCard, ActionType: All, label: ForceActivate} -) - -// PodSchedulingPropertiesChange interprets the update of a pod and returns corresponding UpdatePodXYZ event(s). -// Once we have other pod update events, we should update here as well. -func PodSchedulingPropertiesChange(newPod *v1.Pod, oldPod *v1.Pod) (events []ClusterEvent) { - r := assignedPod - if newPod.Spec.NodeName == "" { - r = unschedulablePod - } - - podChangeExtracters := []podChangeExtractor{ - extractPodLabelsChange, - extractPodScaleDown, - extractPodSchedulingGateEliminatedChange, - extractPodTolerationChange, - } - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - podChangeExtracters = append(podChangeExtracters, extractPodGeneratedResourceClaimChange) - } - - for _, fn := range podChangeExtracters { - if event := fn(newPod, oldPod); event != none { - events = append(events, ClusterEvent{Resource: r, ActionType: event}) - } - } - - if len(events) == 0 { - // When no specific event is found, we use AssignedPodOtherUpdate, - // which should only trigger plugins registering a general Pod/Update event. - events = append(events, ClusterEvent{Resource: r, ActionType: updatePodOther}) - } - - return -} - -type podChangeExtractor func(newPod *v1.Pod, oldPod *v1.Pod) ActionType - -// extractPodScaleDown interprets the update of a pod and returns PodRequestScaledDown event if any pod's resource request(s) is scaled down. -func extractPodScaleDown(newPod, oldPod *v1.Pod) ActionType { - opt := resource.PodResourcesOptions{ - UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), - } - newPodRequests := resource.PodRequests(newPod, opt) - oldPodRequests := resource.PodRequests(oldPod, opt) - - for rName, oldReq := range oldPodRequests { - newReq, ok := newPodRequests[rName] - if !ok { - // The resource request of rName is removed. - return UpdatePodScaleDown - } - - if oldReq.MilliValue() > newReq.MilliValue() { - // The resource request of rName is scaled down. - return UpdatePodScaleDown - } - } - - return none -} - -func extractPodLabelsChange(newPod *v1.Pod, oldPod *v1.Pod) ActionType { - if isLabelChanged(newPod.GetLabels(), oldPod.GetLabels()) { - return UpdatePodLabel - } - return none -} - -func extractPodTolerationChange(newPod *v1.Pod, oldPod *v1.Pod) ActionType { - if len(newPod.Spec.Tolerations) != len(oldPod.Spec.Tolerations) { - // A Pod got a new toleration. - // Due to API validation, the user can add, but cannot modify or remove tolerations. - // So, it's enough to just check the length of tolerations to notice the update. - // And, any updates in tolerations could make Pod schedulable. - return UpdatePodToleration - } - - return none -} - -func extractPodSchedulingGateEliminatedChange(newPod *v1.Pod, oldPod *v1.Pod) ActionType { - if len(newPod.Spec.SchedulingGates) == 0 && len(oldPod.Spec.SchedulingGates) != 0 { - // A scheduling gate on the pod is completely removed. - return UpdatePodSchedulingGatesEliminated - } - - return none -} - -func extractPodGeneratedResourceClaimChange(newPod *v1.Pod, oldPod *v1.Pod) ActionType { - if !resourceclaim.PodStatusEqual(newPod.Status.ResourceClaimStatuses, oldPod.Status.ResourceClaimStatuses) { - return UpdatePodGeneratedResourceClaim - } - - return none -} - -// NodeSchedulingPropertiesChange interprets the update of a node and returns corresponding UpdateNodeXYZ event(s). -func NodeSchedulingPropertiesChange(newNode *v1.Node, oldNode *v1.Node) (events []ClusterEvent) { - nodeChangeExtracters := []nodeChangeExtractor{ - extractNodeSpecUnschedulableChange, - extractNodeAllocatableChange, - extractNodeLabelsChange, - extractNodeTaintsChange, - extractNodeConditionsChange, - extractNodeAnnotationsChange, - } - - for _, fn := range nodeChangeExtracters { - if event := fn(newNode, oldNode); event != none { - events = append(events, ClusterEvent{Resource: Node, ActionType: event}) - } - } - return -} - -type nodeChangeExtractor func(newNode *v1.Node, oldNode *v1.Node) ActionType - -func extractNodeAllocatableChange(newNode *v1.Node, oldNode *v1.Node) ActionType { - if !equality.Semantic.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable) { - return UpdateNodeAllocatable - } - return none -} - -func extractNodeLabelsChange(newNode *v1.Node, oldNode *v1.Node) ActionType { - if isLabelChanged(newNode.GetLabels(), oldNode.GetLabels()) { - return UpdateNodeLabel - } - return none -} - -func isLabelChanged(newLabels map[string]string, oldLabels map[string]string) bool { - return !equality.Semantic.DeepEqual(newLabels, oldLabels) -} - -func extractNodeTaintsChange(newNode *v1.Node, oldNode *v1.Node) ActionType { - if !equality.Semantic.DeepEqual(newNode.Spec.Taints, oldNode.Spec.Taints) { - return UpdateNodeTaint - } - return none -} - -func extractNodeConditionsChange(newNode *v1.Node, oldNode *v1.Node) ActionType { - strip := func(conditions []v1.NodeCondition) map[v1.NodeConditionType]v1.ConditionStatus { - conditionStatuses := make(map[v1.NodeConditionType]v1.ConditionStatus, len(conditions)) - for i := range conditions { - conditionStatuses[conditions[i].Type] = conditions[i].Status - } - return conditionStatuses - } - if !equality.Semantic.DeepEqual(strip(oldNode.Status.Conditions), strip(newNode.Status.Conditions)) { - return UpdateNodeCondition - } - return none -} - -func extractNodeSpecUnschedulableChange(newNode *v1.Node, oldNode *v1.Node) ActionType { - if newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable && !newNode.Spec.Unschedulable { - // TODO: create UpdateNodeSpecUnschedulable ActionType - return UpdateNodeTaint - } - return none -} - -func extractNodeAnnotationsChange(newNode *v1.Node, oldNode *v1.Node) ActionType { - if !equality.Semantic.DeepEqual(oldNode.GetAnnotations(), newNode.GetAnnotations()) { - return UpdateNodeAnnotation - } - return none -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/extender.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/extender.go deleted file mode 100644 index b0b937c36b3..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/extender.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - v1 "k8s.io/api/core/v1" - extenderv1 "k8s.io/kube-scheduler/extender/v1" -) - -// Extender is an interface for external processes to influence scheduling -// decisions made by Kubernetes. This is typically needed for resources not directly -// managed by Kubernetes. -type Extender interface { - // Name returns a unique name that identifies the extender. - Name() string - - // Filter based on extender-implemented predicate functions. The filtered list is - // expected to be a subset of the supplied list. - // The failedNodes and failedAndUnresolvableNodes optionally contains the list - // of failed nodes and failure reasons, except nodes in the latter are - // unresolvable. - Filter(pod *v1.Pod, nodes []*NodeInfo) (filteredNodes []*NodeInfo, failedNodesMap extenderv1.FailedNodesMap, failedAndUnresolvable extenderv1.FailedNodesMap, err error) - - // Prioritize based on extender-implemented priority functions. The returned scores & weight - // are used to compute the weighted score for an extender. The weighted scores are added to - // the scores computed by Kubernetes scheduler. The total scores are used to do the host selection. - Prioritize(pod *v1.Pod, nodes []*NodeInfo) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error) - - // Bind delegates the action of binding a pod to a node to the extender. - Bind(binding *v1.Binding) error - - // IsBinder returns whether this extender is configured for the Bind method. - IsBinder() bool - - // IsInterested returns true if at least one extended resource requested by - // this pod is managed by this extender. - IsInterested(pod *v1.Pod) bool - - // IsPrioritizer returns whether this extender is configured for the Prioritize method. - IsPrioritizer() bool - - // IsFilter returns whether this extender is configured for the Filter method. - IsFilter() bool - - // ProcessPreemption returns nodes with their victim pods processed by extender based on - // given: - // 1. Pod to schedule - // 2. Candidate nodes and victim pods (nodeNameToVictims) generated by previous scheduling process. - // The possible changes made by extender may include: - // 1. Subset of given candidate nodes after preemption phase of extender. - // 2. A different set of victim pod for every given candidate node after preemption phase of extender. - ProcessPreemption( - pod *v1.Pod, - nodeNameToVictims map[string]*extenderv1.Victims, - nodeInfos NodeInfoLister, - ) (map[string]*extenderv1.Victims, error) - - // SupportsPreemption returns if the scheduler extender support preemption or not. - SupportsPreemption() bool - - // IsIgnorable returns true indicates scheduling should not fail when this extender - // is unavailable. This gives scheduler ability to fail fast and tolerate non-critical extenders as well. - // Both Filter and Bind actions are supported. - IsIgnorable() bool -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go deleted file mode 100644 index 10ee4203d7d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go +++ /dev/null @@ -1,954 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file defines the scheduling framework plugin interfaces. - -package framework - -import ( - "context" - "errors" - "math" - "strings" - "sync" - "time" - - "github.com/google/go-cmp/cmp" //nolint:depguard - "github.com/google/go-cmp/cmp/cmpopts" //nolint:depguard - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/events" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/framework/parallelize" -) - -// NodeScoreList declares a list of nodes and their scores. -type NodeScoreList []NodeScore - -// NodeScore is a struct with node name and score. -type NodeScore struct { - Name string - Score int64 -} - -// NodeToStatusReader is a read-only interface of NodeToStatus passed to each PostFilter plugin. -type NodeToStatusReader interface { - // Get returns the status for given nodeName. - // If the node is not in the map, the AbsentNodesStatus is returned. - Get(nodeName string) *Status - // NodesForStatusCode returns a list of NodeInfos for the nodes that have a given status code. - // It returns the NodeInfos for all matching nodes denoted by AbsentNodesStatus as well. - NodesForStatusCode(nodeLister NodeInfoLister, code Code) ([]*NodeInfo, error) -} - -// NodeToStatusMap is an alias for NodeToStatusReader to keep partial backwards compatibility. -// NodeToStatusReader should be used if possible. -type NodeToStatusMap = NodeToStatusReader - -// NodeToStatus contains the statuses of the Nodes where the incoming Pod was not schedulable. -type NodeToStatus struct { - // nodeToStatus contains specific statuses of the nodes. - nodeToStatus map[string]*Status - // absentNodesStatus defines a status for all nodes that are absent in nodeToStatus map. - // By default, all absent nodes are UnschedulableAndUnresolvable. - absentNodesStatus *Status -} - -// NewDefaultNodeToStatus creates NodeToStatus without any node in the map. -// The absentNodesStatus is set by default to UnschedulableAndUnresolvable. -func NewDefaultNodeToStatus() *NodeToStatus { - return NewNodeToStatus(make(map[string]*Status), NewStatus(UnschedulableAndUnresolvable)) -} - -// NewNodeToStatus creates NodeToStatus initialized with given nodeToStatus and absentNodesStatus. -func NewNodeToStatus(nodeToStatus map[string]*Status, absentNodesStatus *Status) *NodeToStatus { - return &NodeToStatus{ - nodeToStatus: nodeToStatus, - absentNodesStatus: absentNodesStatus, - } -} - -// Get returns the status for given nodeName. If the node is not in the map, the absentNodesStatus is returned. -func (m *NodeToStatus) Get(nodeName string) *Status { - if status, ok := m.nodeToStatus[nodeName]; ok { - return status - } - return m.absentNodesStatus -} - -// Set sets status for given nodeName. -func (m *NodeToStatus) Set(nodeName string, status *Status) { - m.nodeToStatus[nodeName] = status -} - -// Len returns length of nodeToStatus map. It is not aware of number of absent nodes. -func (m *NodeToStatus) Len() int { - return len(m.nodeToStatus) -} - -// AbsentNodesStatus returns absentNodesStatus value. -func (m *NodeToStatus) AbsentNodesStatus() *Status { - return m.absentNodesStatus -} - -// SetAbsentNodesStatus sets absentNodesStatus value. -func (m *NodeToStatus) SetAbsentNodesStatus(status *Status) { - m.absentNodesStatus = status -} - -// ForEachExplicitNode runs fn for each node which status is explicitly set. -// Imporatant note, it runs the fn only for nodes with a status explicitly registered, -// and hence may not run the fn for all existing nodes. -// For example, if PreFilter rejects all Nodes, the scheduler would NOT set a failure status to every Node, -// but set a failure status as AbsentNodesStatus. -// You're supposed to get a status from AbsentNodesStatus(), and consider all other nodes that are rejected by them. -func (m *NodeToStatus) ForEachExplicitNode(fn func(nodeName string, status *Status)) { - for nodeName, status := range m.nodeToStatus { - fn(nodeName, status) - } -} - -// NodesForStatusCode returns a list of NodeInfos for the nodes that matches a given status code. -// If the absentNodesStatus matches the code, all existing nodes are fetched using nodeLister -// and filtered using NodeToStatus.Get. -// If the absentNodesStatus doesn't match the code, nodeToStatus map is used to create a list of nodes -// and nodeLister.Get is used to obtain NodeInfo for each. -func (m *NodeToStatus) NodesForStatusCode(nodeLister NodeInfoLister, code Code) ([]*NodeInfo, error) { - var resultNodes []*NodeInfo - - if m.AbsentNodesStatus().Code() == code { - allNodes, err := nodeLister.List() - if err != nil { - return nil, err - } - if m.Len() == 0 { - // All nodes are absent and status code is matching, so can return all nodes. - return allNodes, nil - } - // Need to find all the nodes that are absent or have a matching code using the allNodes. - for _, node := range allNodes { - nodeName := node.Node().Name - if status := m.Get(nodeName); status.Code() == code { - resultNodes = append(resultNodes, node) - } - } - return resultNodes, nil - } - - m.ForEachExplicitNode(func(nodeName string, status *Status) { - if status.Code() == code { - if nodeInfo, err := nodeLister.Get(nodeName); err == nil { - resultNodes = append(resultNodes, nodeInfo) - } - } - }) - - return resultNodes, nil -} - -// NodePluginScores is a struct with node name and scores for that node. -type NodePluginScores struct { - // Name is node name. - Name string - // Scores is scores from plugins and extenders. - Scores []PluginScore - // TotalScore is the total score in Scores. - TotalScore int64 -} - -// PluginScore is a struct with plugin/extender name and score. -type PluginScore struct { - // Name is the name of plugin or extender. - Name string - Score int64 -} - -// Code is the Status code/type which is returned from plugins. -type Code int - -// These are predefined codes used in a Status. -// Note: when you add a new status, you have to add it in `codes` slice below. -const ( - // Success means that plugin ran correctly and found pod schedulable. - // NOTE: A nil status is also considered as "Success". - Success Code = iota - // Error is one of the failures, used for internal plugin errors, unexpected input, etc. - // Plugin shouldn't return this code for expected failures, like Unschedulable. - // Since it's the unexpected failure, the scheduling queue registers the pod without unschedulable plugins. - // Meaning, the Pod will be requeued to activeQ/backoffQ soon. - Error - // Unschedulable is one of the failures, used when a plugin finds a pod unschedulable. - // If it's returned from PreFilter or Filter, the scheduler might attempt to - // run other postFilter plugins like preemption to get this pod scheduled. - // Use UnschedulableAndUnresolvable to make the scheduler skipping other postFilter plugins. - // The accompanying status message should explain why the pod is unschedulable. - // - // We regard the backoff as a penalty of wasting the scheduling cycle. - // When the scheduling queue requeues Pods, which was rejected with Unschedulable in the last scheduling, - // the Pod goes through backoff. - Unschedulable - // UnschedulableAndUnresolvable is used when a plugin finds a pod unschedulable and - // other postFilter plugins like preemption would not change anything. - // See the comment on PostFilter interface for more details about how PostFilter should handle this status. - // Plugins should return Unschedulable if it is possible that the pod can get scheduled - // after running other postFilter plugins. - // The accompanying status message should explain why the pod is unschedulable. - // - // We regard the backoff as a penalty of wasting the scheduling cycle. - // When the scheduling queue requeues Pods, which was rejected with UnschedulableAndUnresolvable in the last scheduling, - // the Pod goes through backoff. - UnschedulableAndUnresolvable - // Wait is used when a Permit plugin finds a pod scheduling should wait. - Wait - // Skip is used in the following scenarios: - // - when a Bind plugin chooses to skip binding. - // - when a PreFilter plugin returns Skip so that coupled Filter plugin/PreFilterExtensions() will be skipped. - // - when a PreScore plugin returns Skip so that coupled Score plugin will be skipped. - Skip - // Pending means that the scheduling process is finished successfully, - // but the plugin wants to stop the scheduling cycle/binding cycle here. - // - // For example, if your plugin has to notify the scheduling result to an external component, - // and wait for it to complete something **before** binding. - // It's different from when to return Unschedulable/UnschedulableAndUnresolvable, - // because in this case, the scheduler decides where the Pod can go successfully, - // but we need to wait for the external component to do something based on that scheduling result. - // - // We regard the backoff as a penalty of wasting the scheduling cycle. - // In the case of returning Pending, we cannot say the scheduling cycle is wasted - // because the scheduling result is used to proceed the Pod's scheduling forward, - // that particular scheduling cycle is failed though. - // So, Pods rejected by such reasons don't need to suffer a penalty (backoff). - // When the scheduling queue requeues Pods, which was rejected with Pending in the last scheduling, - // the Pod goes to activeQ directly ignoring backoff. - Pending -) - -// This list should be exactly the same as the codes iota defined above in the same order. -var codes = []string{"Success", "Error", "Unschedulable", "UnschedulableAndUnresolvable", "Wait", "Skip", "Pending"} - -func (c Code) String() string { - return codes[c] -} - -const ( - // MaxNodeScore is the maximum score a Score plugin is expected to return. - MaxNodeScore int64 = 100 - - // MinNodeScore is the minimum score a Score plugin is expected to return. - MinNodeScore int64 = 0 - - // MaxTotalScore is the maximum total score. - MaxTotalScore int64 = math.MaxInt64 -) - -// PodsToActivateKey is a reserved state key for stashing pods. -// If the stashed pods are present in unschedulablePods or backoffQ,they will be -// activated (i.e., moved to activeQ) in two phases: -// - end of a scheduling cycle if it succeeds (will be cleared from `PodsToActivate` if activated) -// - end of a binding cycle if it succeeds -var PodsToActivateKey StateKey = "kubernetes.io/pods-to-activate" - -// PodsToActivate stores pods to be activated. -type PodsToActivate struct { - sync.Mutex - // Map is keyed with namespaced pod name, and valued with the pod. - Map map[string]*v1.Pod -} - -// Clone just returns the same state. -func (s *PodsToActivate) Clone() StateData { - return s -} - -// NewPodsToActivate instantiates a PodsToActivate object. -func NewPodsToActivate() *PodsToActivate { - return &PodsToActivate{Map: make(map[string]*v1.Pod)} -} - -// Status indicates the result of running a plugin. It consists of a code, a -// message, (optionally) an error, and a plugin name it fails by. -// When the status code is not Success, the reasons should explain why. -// And, when code is Success, all the other fields should be empty. -// NOTE: A nil Status is also considered as Success. -type Status struct { - code Code - reasons []string - err error - // plugin is an optional field that records the plugin name causes this status. - // It's set by the framework when code is Unschedulable, UnschedulableAndUnresolvable or Pending. - plugin string -} - -func (s *Status) WithError(err error) *Status { - s.err = err - return s -} - -// Code returns code of the Status. -func (s *Status) Code() Code { - if s == nil { - return Success - } - return s.code -} - -// Message returns a concatenated message on reasons of the Status. -func (s *Status) Message() string { - if s == nil { - return "" - } - return strings.Join(s.Reasons(), ", ") -} - -// SetPlugin sets the given plugin name to s.plugin. -func (s *Status) SetPlugin(plugin string) { - s.plugin = plugin -} - -// WithPlugin sets the given plugin name to s.plugin, -// and returns the given status object. -func (s *Status) WithPlugin(plugin string) *Status { - s.SetPlugin(plugin) - return s -} - -// Plugin returns the plugin name which caused this status. -func (s *Status) Plugin() string { - return s.plugin -} - -// Reasons returns reasons of the Status. -func (s *Status) Reasons() []string { - if s.err != nil { - return append([]string{s.err.Error()}, s.reasons...) - } - return s.reasons -} - -// AppendReason appends given reason to the Status. -func (s *Status) AppendReason(reason string) { - s.reasons = append(s.reasons, reason) -} - -// IsSuccess returns true if and only if "Status" is nil or Code is "Success". -func (s *Status) IsSuccess() bool { - return s.Code() == Success -} - -// IsWait returns true if and only if "Status" is non-nil and its Code is "Wait". -func (s *Status) IsWait() bool { - return s.Code() == Wait -} - -// IsSkip returns true if and only if "Status" is non-nil and its Code is "Skip". -func (s *Status) IsSkip() bool { - return s.Code() == Skip -} - -// IsRejected returns true if "Status" is Unschedulable (Unschedulable, UnschedulableAndUnresolvable, or Pending). -func (s *Status) IsRejected() bool { - code := s.Code() - return code == Unschedulable || code == UnschedulableAndUnresolvable || code == Pending -} - -// AsError returns nil if the status is a success, a wait or a skip; otherwise returns an "error" object -// with a concatenated message on reasons of the Status. -func (s *Status) AsError() error { - if s.IsSuccess() || s.IsWait() || s.IsSkip() { - return nil - } - if s.err != nil { - return s.err - } - return errors.New(s.Message()) -} - -// Equal checks equality of two statuses. This is useful for testing with -// cmp.Equal. -func (s *Status) Equal(x *Status) bool { - if s == nil || x == nil { - return s.IsSuccess() && x.IsSuccess() - } - if s.code != x.code { - return false - } - if !cmp.Equal(s.err, x.err, cmpopts.EquateErrors()) { - return false - } - if !cmp.Equal(s.reasons, x.reasons) { - return false - } - return cmp.Equal(s.plugin, x.plugin) -} - -func (s *Status) String() string { - return s.Message() -} - -// NewStatus makes a Status out of the given arguments and returns its pointer. -func NewStatus(code Code, reasons ...string) *Status { - s := &Status{ - code: code, - reasons: reasons, - } - return s -} - -// AsStatus wraps an error in a Status. -func AsStatus(err error) *Status { - if err == nil { - return nil - } - return &Status{ - code: Error, - err: err, - } -} - -// WaitingPod represents a pod currently waiting in the permit phase. -type WaitingPod interface { - // GetPod returns a reference to the waiting pod. - GetPod() *v1.Pod - // GetPendingPlugins returns a list of pending Permit plugin's name. - GetPendingPlugins() []string - // Allow declares the waiting pod is allowed to be scheduled by the plugin named as "pluginName". - // If this is the last remaining plugin to allow, then a success signal is delivered - // to unblock the pod. - Allow(pluginName string) - // Reject declares the waiting pod unschedulable. - Reject(pluginName, msg string) -} - -// Plugin is the parent type for all the scheduling framework plugins. -type Plugin interface { - Name() string -} - -// PreEnqueuePlugin is an interface that must be implemented by "PreEnqueue" plugins. -// These plugins are called prior to adding Pods to activeQ. -// Note: an preEnqueue plugin is expected to be lightweight and efficient, so it's not expected to -// involve expensive calls like accessing external endpoints; otherwise it'd block other -// Pods' enqueuing in event handlers. -type PreEnqueuePlugin interface { - Plugin - // PreEnqueue is called prior to adding Pods to activeQ. - PreEnqueue(ctx context.Context, p *v1.Pod) *Status -} - -// LessFunc is the function to sort pod info -type LessFunc func(podInfo1, podInfo2 *QueuedPodInfo) bool - -// QueueSortPlugin is an interface that must be implemented by "QueueSort" plugins. -// These plugins are used to sort pods in the scheduling queue. Only one queue sort -// plugin may be enabled at a time. -type QueueSortPlugin interface { - Plugin - // Less are used to sort pods in the scheduling queue. - Less(*QueuedPodInfo, *QueuedPodInfo) bool -} - -// EnqueueExtensions is an optional interface that plugins can implement to efficiently -// move unschedulable Pods in internal scheduling queues. -// In the scheduler, Pods can be unschedulable by PreEnqueue, PreFilter, Filter, Reserve, and Permit plugins, -// and Pods rejected by these plugins are requeued based on this extension point. -// Failures from other extension points are regarded as temporal errors (e.g., network failure), -// and the scheduler requeue Pods without this extension point - always requeue Pods to activeQ after backoff. -// This is because such temporal errors cannot be resolved by specific cluster events, -// and we have no choose but keep retrying scheduling until the failure is resolved. -// -// Plugins that make pod unschedulable (PreEnqueue, PreFilter, Filter, Reserve, and Permit plugins) should implement this interface, -// otherwise the default implementation will be used, which is less efficient in requeueing Pods rejected by the plugin. -// And, if plugins other than above extension points support this interface, they are just ignored. -type EnqueueExtensions interface { - Plugin - // EventsToRegister returns a series of possible events that may cause a Pod - // failed by this plugin schedulable. Each event has a callback function that - // filters out events to reduce useless retry of Pod's scheduling. - // The events will be registered when instantiating the internal scheduling queue, - // and leveraged to build event handlers dynamically. - // When it returns an error, the scheduler fails to start. - // Note: the returned list needs to be determined at a startup, - // and the scheduler only evaluates it once during start up. - // Do not change the result during runtime, for example, based on the cluster's state etc. - // - // Appropriate implementation of this function will make Pod's re-scheduling accurate and performant. - EventsToRegister(context.Context) ([]ClusterEventWithHint, error) -} - -// PreFilterExtensions is an interface that is included in plugins that allow specifying -// callbacks to make incremental updates to its supposedly pre-calculated -// state. -type PreFilterExtensions interface { - // AddPod is called by the framework while trying to evaluate the impact - // of adding podToAdd to the node while scheduling podToSchedule. - AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podInfoToAdd *PodInfo, nodeInfo *NodeInfo) *Status - // RemovePod is called by the framework while trying to evaluate the impact - // of removing podToRemove from the node while scheduling podToSchedule. - RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podInfoToRemove *PodInfo, nodeInfo *NodeInfo) *Status -} - -// PreFilterPlugin is an interface that must be implemented by "PreFilter" plugins. -// These plugins are called at the beginning of the scheduling cycle. -type PreFilterPlugin interface { - Plugin - // PreFilter is called at the beginning of the scheduling cycle. All PreFilter - // plugins must return success or the pod will be rejected. PreFilter could optionally - // return a PreFilterResult to influence which nodes to evaluate downstream. This is useful - // for cases where it is possible to determine the subset of nodes to process in O(1) time. - // When PreFilterResult filters out some Nodes, the framework considers Nodes that are filtered out as getting "UnschedulableAndUnresolvable". - // i.e., those Nodes will be out of the candidates of the preemption. - // - // When it returns Skip status, returned PreFilterResult and other fields in status are just ignored, - // and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle. - PreFilter(ctx context.Context, state *CycleState, p *v1.Pod) (*PreFilterResult, *Status) - // PreFilterExtensions returns a PreFilterExtensions interface if the plugin implements one, - // or nil if it does not. A Pre-filter plugin can provide extensions to incrementally - // modify its pre-processed info. The framework guarantees that the extensions - // AddPod/RemovePod will only be called after PreFilter, possibly on a cloned - // CycleState, and may call those functions more than once before calling - // Filter again on a specific node. - PreFilterExtensions() PreFilterExtensions -} - -// FilterPlugin is an interface for Filter plugins. These plugins are called at the -// filter extension point for filtering out hosts that cannot run a pod. -// This concept used to be called 'predicate' in the original scheduler. -// These plugins should return "Success", "Unschedulable" or "Error" in Status.code. -// However, the scheduler accepts other valid codes as well. -// Anything other than "Success" will lead to exclusion of the given host from -// running the pod. -type FilterPlugin interface { - Plugin - // Filter is called by the scheduling framework. - // All FilterPlugins should return "Success" to declare that - // the given node fits the pod. If Filter doesn't return "Success", - // it will return "Unschedulable", "UnschedulableAndUnresolvable" or "Error". - // - // "Error" aborts pod scheduling and puts the pod into the backoff queue. - // - // For the node being evaluated, Filter plugins should look at the passed - // nodeInfo reference for this particular node's information (e.g., pods - // considered to be running on the node) instead of looking it up in the - // NodeInfoSnapshot because we don't guarantee that they will be the same. - // For example, during preemption, we may pass a copy of the original - // nodeInfo object that has some pods removed from it to evaluate the - // possibility of preempting them to schedule the target pod. - Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status -} - -// PostFilterPlugin is an interface for "PostFilter" plugins. These plugins are called -// after a pod cannot be scheduled. -type PostFilterPlugin interface { - Plugin - // PostFilter is called by the scheduling framework - // when the scheduling cycle failed at PreFilter or Filter by Unschedulable or UnschedulableAndUnresolvable. - // NodeToStatusReader has statuses that each Node got in PreFilter or Filter phase. - // - // If you're implementing a custom preemption with PostFilter, ignoring Nodes with UnschedulableAndUnresolvable is the responsibility of your plugin, - // meaning NodeToStatusReader could have Nodes with UnschedulableAndUnresolvable - // and the scheduling framework does call PostFilter plugins even when all Nodes in NodeToStatusReader are UnschedulableAndUnresolvable. - // - // A PostFilter plugin should return one of the following statuses: - // - Unschedulable: the plugin gets executed successfully but the pod cannot be made schedulable. - // - Success: the plugin gets executed successfully and the pod can be made schedulable. - // - Error: the plugin aborts due to some internal error. - // - // Informational plugins should be configured ahead of other ones, and always return Unschedulable status. - // Optionally, a non-nil PostFilterResult may be returned along with a Success status. For example, - // a preemption plugin may choose to return nominatedNodeName, so that framework can reuse that to update the - // preemptor pod's .spec.status.nominatedNodeName field. - PostFilter(ctx context.Context, state *CycleState, pod *v1.Pod, filteredNodeStatusMap NodeToStatusReader) (*PostFilterResult, *Status) -} - -// PreScorePlugin is an interface for "PreScore" plugin. PreScore is an -// informational extension point. Plugins will be called with a list of nodes -// that passed the filtering phase. A plugin may use this data to update internal -// state or to generate logs/metrics. -type PreScorePlugin interface { - Plugin - // PreScore is called by the scheduling framework after a list of nodes - // passed the filtering phase. All prescore plugins must return success or - // the pod will be rejected - // When it returns Skip status, other fields in status are just ignored, - // and coupled Score plugin will be skipped in this scheduling cycle. - PreScore(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*NodeInfo) *Status -} - -// ScoreExtensions is an interface for Score extended functionality. -type ScoreExtensions interface { - // NormalizeScore is called for all node scores produced by the same plugin's "Score" - // method. A successful run of NormalizeScore will update the scores list and return - // a success status. - NormalizeScore(ctx context.Context, state *CycleState, p *v1.Pod, scores NodeScoreList) *Status -} - -// ScorePlugin is an interface that must be implemented by "Score" plugins to rank -// nodes that passed the filtering phase. -type ScorePlugin interface { - Plugin - // Score is called on each filtered node. It must return success and an integer - // indicating the rank of the node. All scoring plugins must return success or - // the pod will be rejected. - Score(ctx context.Context, state *CycleState, p *v1.Pod, nodeInfo *NodeInfo) (int64, *Status) - - // ScoreExtensions returns a ScoreExtensions interface if it implements one, or nil if does not. - ScoreExtensions() ScoreExtensions -} - -// ReservePlugin is an interface for plugins with Reserve and Unreserve -// methods. These are meant to update the state of the plugin. This concept -// used to be called 'assume' in the original scheduler. These plugins should -// return only Success or Error in Status.code. However, the scheduler accepts -// other valid codes as well. Anything other than Success will lead to -// rejection of the pod. -type ReservePlugin interface { - Plugin - // Reserve is called by the scheduling framework when the scheduler cache is - // updated. If this method returns a failed Status, the scheduler will call - // the Unreserve method for all enabled ReservePlugins. - Reserve(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) *Status - // Unreserve is called by the scheduling framework when a reserved pod was - // rejected, an error occurred during reservation of subsequent plugins, or - // in a later phase. The Unreserve method implementation must be idempotent - // and may be called by the scheduler even if the corresponding Reserve - // method for the same plugin was not called. - Unreserve(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) -} - -// PreBindPlugin is an interface that must be implemented by "PreBind" plugins. -// These plugins are called before a pod being scheduled. -type PreBindPlugin interface { - Plugin - // PreBind is called before binding a pod. All prebind plugins must return - // success or the pod will be rejected and won't be sent for binding. - PreBind(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) *Status -} - -// PostBindPlugin is an interface that must be implemented by "PostBind" plugins. -// These plugins are called after a pod is successfully bound to a node. -type PostBindPlugin interface { - Plugin - // PostBind is called after a pod is successfully bound. These plugins are - // informational. A common application of this extension point is for cleaning - // up. If a plugin needs to clean-up its state after a pod is scheduled and - // bound, PostBind is the extension point that it should register. - PostBind(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) -} - -// PermitPlugin is an interface that must be implemented by "Permit" plugins. -// These plugins are called before a pod is bound to a node. -type PermitPlugin interface { - Plugin - // Permit is called before binding a pod (and before prebind plugins). Permit - // plugins are used to prevent or delay the binding of a Pod. A permit plugin - // must return success or wait with timeout duration, or the pod will be rejected. - // The pod will also be rejected if the wait timeout or the pod is rejected while - // waiting. Note that if the plugin returns "wait", the framework will wait only - // after running the remaining plugins given that no other plugin rejects the pod. - Permit(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) (*Status, time.Duration) -} - -// BindPlugin is an interface that must be implemented by "Bind" plugins. Bind -// plugins are used to bind a pod to a Node. -type BindPlugin interface { - Plugin - // Bind plugins will not be called until all pre-bind plugins have completed. Each - // bind plugin is called in the configured order. A bind plugin may choose whether - // or not to handle the given Pod. If a bind plugin chooses to handle a Pod, the - // remaining bind plugins are skipped. When a bind plugin does not handle a pod, - // it must return Skip in its Status code. If a bind plugin returns an Error, the - // pod is rejected and will not be bound. - Bind(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) *Status -} - -// Framework manages the set of plugins in use by the scheduling framework. -// Configured plugins are called at specified points in a scheduling context. -type Framework interface { - Handle - - // PreEnqueuePlugins returns the registered preEnqueue plugins. - PreEnqueuePlugins() []PreEnqueuePlugin - - // EnqueueExtensions returns the registered Enqueue extensions. - EnqueueExtensions() []EnqueueExtensions - - // QueueSortFunc returns the function to sort pods in scheduling queue - QueueSortFunc() LessFunc - - // RunPreFilterPlugins runs the set of configured PreFilter plugins. It returns - // *Status and its code is set to non-success if any of the plugins returns - // anything but Success. If a non-success status is returned, then the scheduling - // cycle is aborted. - // It also returns a PreFilterResult, which may influence what or how many nodes to - // evaluate downstream. - // The third returns value contains PreFilter plugin that rejected some or all Nodes with PreFilterResult. - // But, note that it doesn't contain any plugin when a plugin rejects this Pod with non-success status, - // not with PreFilterResult. - RunPreFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod) (*PreFilterResult, *Status, sets.Set[string]) - - // RunPostFilterPlugins runs the set of configured PostFilter plugins. - // PostFilter plugins can either be informational, in which case should be configured - // to execute first and return Unschedulable status, or ones that try to change the - // cluster state to make the pod potentially schedulable in a future scheduling cycle. - RunPostFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, filteredNodeStatusMap NodeToStatusReader) (*PostFilterResult, *Status) - - // RunPreBindPlugins runs the set of configured PreBind plugins. It returns - // *Status and its code is set to non-success if any of the plugins returns - // anything but Success. If the Status code is "Unschedulable", it is - // considered as a scheduling check failure, otherwise, it is considered as an - // internal error. In either case the pod is not going to be bound. - RunPreBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status - - // RunPostBindPlugins runs the set of configured PostBind plugins. - RunPostBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) - - // RunReservePluginsReserve runs the Reserve method of the set of - // configured Reserve plugins. If any of these calls returns an error, it - // does not continue running the remaining ones and returns the error. In - // such case, pod will not be scheduled. - RunReservePluginsReserve(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status - - // RunReservePluginsUnreserve runs the Unreserve method of the set of - // configured Reserve plugins. - RunReservePluginsUnreserve(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) - - // RunPermitPlugins runs the set of configured Permit plugins. If any of these - // plugins returns a status other than "Success" or "Wait", it does not continue - // running the remaining plugins and returns an error. Otherwise, if any of the - // plugins returns "Wait", then this function will create and add waiting pod - // to a map of currently waiting pods and return status with "Wait" code. - // Pod will remain waiting pod for the minimum duration returned by the Permit plugins. - RunPermitPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status - - // WaitOnPermit will block, if the pod is a waiting pod, until the waiting pod is rejected or allowed. - WaitOnPermit(ctx context.Context, pod *v1.Pod) *Status - - // RunBindPlugins runs the set of configured Bind plugins. A Bind plugin may choose - // whether or not to handle the given Pod. If a Bind plugin chooses to skip the - // binding, it should return code=5("skip") status. Otherwise, it should return "Error" - // or "Success". If none of the plugins handled binding, RunBindPlugins returns - // code=5("skip") status. - RunBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status - - // HasFilterPlugins returns true if at least one Filter plugin is defined. - HasFilterPlugins() bool - - // HasPostFilterPlugins returns true if at least one PostFilter plugin is defined. - HasPostFilterPlugins() bool - - // HasScorePlugins returns true if at least one Score plugin is defined. - HasScorePlugins() bool - - // ListPlugins returns a map of extension point name to list of configured Plugins. - ListPlugins() *config.Plugins - - // ProfileName returns the profile name associated to a profile. - ProfileName() string - - // PercentageOfNodesToScore returns percentageOfNodesToScore associated to a profile. - PercentageOfNodesToScore() *int32 - - // SetPodNominator sets the PodNominator - SetPodNominator(nominator PodNominator) - // SetPodActivator sets the PodActivator - SetPodActivator(activator PodActivator) - - // Close calls Close method of each plugin. - Close() error -} - -// Handle provides data and some tools that plugins can use. It is -// passed to the plugin factories at the time of plugin initialization. Plugins -// must store and use this handle to call framework functions. -type Handle interface { - // PodNominator abstracts operations to maintain nominated Pods. - PodNominator - // PluginsRunner abstracts operations to run some plugins. - PluginsRunner - // PodActivator abstracts operations in the scheduling queue. - PodActivator - // SnapshotSharedLister returns listers from the latest NodeInfo Snapshot. The snapshot - // is taken at the beginning of a scheduling cycle and remains unchanged until - // a pod finishes "Permit" point. - // - // It should be used only during scheduling cycle: - // - There is no guarantee that the information remains unchanged in the binding phase of scheduling. - // So, plugins shouldn't use it in the binding cycle (pre-bind/bind/post-bind/un-reserve plugin) - // otherwise, a concurrent read/write error might occur. - // - There is no guarantee that the information is always up-to-date. - // So, plugins shouldn't use it in QueueingHint and PreEnqueue - // otherwise, they might make a decision based on stale information. - // - // Instead, they should use the resources getting from Informer created from SharedInformerFactory(). - SnapshotSharedLister() SharedLister - - // IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map. - IterateOverWaitingPods(callback func(WaitingPod)) - - // GetWaitingPod returns a waiting pod given its UID. - GetWaitingPod(uid types.UID) WaitingPod - - // RejectWaitingPod rejects a waiting pod given its UID. - // The return value indicates if the pod is waiting or not. - RejectWaitingPod(uid types.UID) bool - - // ClientSet returns a kubernetes clientSet. - ClientSet() clientset.Interface - - // KubeConfig returns the raw kube config. - KubeConfig() *restclient.Config - - // EventRecorder returns an event recorder. - EventRecorder() events.EventRecorder - - SharedInformerFactory() informers.SharedInformerFactory - - // SharedDRAManager can be used to obtain DRA objects, and track modifications to them in-memory - mainly by the DRA plugin. - // A non-default implementation can be plugged into the framework to simulate the state of DRA objects. - SharedDRAManager() SharedDRAManager - - // RunFilterPluginsWithNominatedPods runs the set of configured filter plugins for nominated pod on the given node. - RunFilterPluginsWithNominatedPods(ctx context.Context, state *CycleState, pod *v1.Pod, info *NodeInfo) *Status - - // Extenders returns registered scheduler extenders. - Extenders() []Extender - - // Parallelizer returns a parallelizer holding parallelism for scheduler. - Parallelizer() parallelize.Parallelizer -} - -// PreFilterResult wraps needed info for scheduler framework to act upon PreFilter phase. -type PreFilterResult struct { - // The set of nodes that should be considered downstream; if nil then - // all nodes are eligible. - NodeNames sets.Set[string] -} - -func (p *PreFilterResult) AllNodes() bool { - return p == nil || p.NodeNames == nil -} - -func (p *PreFilterResult) Merge(in *PreFilterResult) *PreFilterResult { - if p.AllNodes() && in.AllNodes() { - return nil - } - - r := PreFilterResult{} - if p.AllNodes() { - r.NodeNames = in.NodeNames.Clone() - return &r - } - if in.AllNodes() { - r.NodeNames = p.NodeNames.Clone() - return &r - } - - r.NodeNames = p.NodeNames.Intersection(in.NodeNames) - return &r -} - -type NominatingMode int - -const ( - ModeNoop NominatingMode = iota - ModeOverride -) - -type NominatingInfo struct { - NominatedNodeName string - NominatingMode NominatingMode -} - -// PostFilterResult wraps needed info for scheduler framework to act upon PostFilter phase. -type PostFilterResult struct { - *NominatingInfo -} - -func NewPostFilterResultWithNominatedNode(name string) *PostFilterResult { - return &PostFilterResult{ - NominatingInfo: &NominatingInfo{ - NominatedNodeName: name, - NominatingMode: ModeOverride, - }, - } -} - -func (ni *NominatingInfo) Mode() NominatingMode { - if ni == nil { - return ModeNoop - } - return ni.NominatingMode -} - -// PodActivator abstracts operations in the scheduling queue. -type PodActivator interface { - // Activate moves the given pods to activeQ. - // If a pod isn't found in unschedulablePods or backoffQ and it's in-flight, - // the wildcard event is registered so that the pod will be requeued when it comes back. - // But, if a pod isn't found in unschedulablePods or backoffQ and it's not in-flight (i.e., completely unknown pod), - // Activate would ignore the pod. - Activate(logger klog.Logger, pods map[string]*v1.Pod) -} - -// PodNominator abstracts operations to maintain nominated Pods. -type PodNominator interface { - // AddNominatedPod adds the given pod to the nominator or - // updates it if it already exists. - AddNominatedPod(logger klog.Logger, pod *PodInfo, nominatingInfo *NominatingInfo) - // DeleteNominatedPodIfExists deletes nominatedPod from internal cache. It's a no-op if it doesn't exist. - DeleteNominatedPodIfExists(pod *v1.Pod) - // UpdateNominatedPod updates the with . - UpdateNominatedPod(logger klog.Logger, oldPod *v1.Pod, newPodInfo *PodInfo) - // NominatedPodsForNode returns nominatedPods on the given node. - NominatedPodsForNode(nodeName string) []*PodInfo -} - -// PluginsRunner abstracts operations to run some plugins. -// This is used by preemption PostFilter plugins when evaluating the feasibility of -// scheduling the pod on nodes when certain running pods get evicted. -type PluginsRunner interface { - // RunPreScorePlugins runs the set of configured PreScore plugins. If any - // of these plugins returns any status other than "Success", the given pod is rejected. - RunPreScorePlugins(context.Context, *CycleState, *v1.Pod, []*NodeInfo) *Status - // RunScorePlugins runs the set of configured scoring plugins. - // It returns a list that stores scores from each plugin and total score for each Node. - // It also returns *Status, which is set to non-success if any of the plugins returns - // a non-success status. - RunScorePlugins(context.Context, *CycleState, *v1.Pod, []*NodeInfo) ([]NodePluginScores, *Status) - // RunFilterPlugins runs the set of configured Filter plugins for pod on - // the given node. Note that for the node being evaluated, the passed nodeInfo - // reference could be different from the one in NodeInfoSnapshot map (e.g., pods - // considered to be running on the node could be different). For example, during - // preemption, we may pass a copy of the original nodeInfo object that has some pods - // removed from it to evaluate the possibility of preempting them to - // schedule the target pod. - RunFilterPlugins(context.Context, *CycleState, *v1.Pod, *NodeInfo) *Status - // RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured - // PreFilter plugins. It returns directly if any of the plugins return any - // status other than Success. - RunPreFilterExtensionAddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podInfoToAdd *PodInfo, nodeInfo *NodeInfo) *Status - // RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured - // PreFilter plugins. It returns directly if any of the plugins return any - // status other than Success. - RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podInfoToRemove *PodInfo, nodeInfo *NodeInfo) *Status -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/listers.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/listers.go deleted file mode 100644 index e56266ba1c6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/listers.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - resourceapi "k8s.io/api/resource/v1beta1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/dynamic-resource-allocation/structured" -) - -// NodeInfoLister interface represents anything that can list/get NodeInfo objects from node name. -type NodeInfoLister interface { - // List returns the list of NodeInfos. - List() ([]*NodeInfo, error) - // HavePodsWithAffinityList returns the list of NodeInfos of nodes with pods with affinity terms. - HavePodsWithAffinityList() ([]*NodeInfo, error) - // HavePodsWithRequiredAntiAffinityList returns the list of NodeInfos of nodes with pods with required anti-affinity terms. - HavePodsWithRequiredAntiAffinityList() ([]*NodeInfo, error) - // Get returns the NodeInfo of the given node name. - Get(nodeName string) (*NodeInfo, error) -} - -// StorageInfoLister interface represents anything that handles storage-related operations and resources. -type StorageInfoLister interface { - // IsPVCUsedByPods returns true/false on whether the PVC is used by one or more scheduled pods, - // keyed in the format "namespace/name". - IsPVCUsedByPods(key string) bool -} - -// SharedLister groups scheduler-specific listers. -type SharedLister interface { - NodeInfos() NodeInfoLister - StorageInfos() StorageInfoLister -} - -// ResourceSliceLister can be used to obtain ResourceSlices. -type ResourceSliceLister interface { - // ListWithDeviceTaintRules returns a list of all ResourceSlices with DeviceTaintRules applied - // if the DRADeviceTaints feature is enabled, otherwise without them. - // - // k8s.io/dynamic-resource-allocation/resourceslice/tracker provides an implementation - // of the necessary logic. That tracker can be instantiated as a replacement for - // a normal ResourceSlice informer and provides a ListPatchedResourceSlices method. - ListWithDeviceTaintRules() ([]*resourceapi.ResourceSlice, error) -} - -// DeviceClassLister can be used to obtain DeviceClasses. -type DeviceClassLister interface { - // List returns a list of all DeviceClasses. - List() ([]*resourceapi.DeviceClass, error) - // Get returns the DeviceClass with the given className. - Get(className string) (*resourceapi.DeviceClass, error) -} - -// ResourceClaimTracker can be used to obtain ResourceClaims, and track changes to ResourceClaims in-memory. -// -// If the claims are meant to be allocated in the API during the binding phase (when used by scheduler), the tracker helps avoid -// race conditions between scheduling and binding phases (as well as between the binding phase and the informer cache update). -// -// If the binding phase is not run (e.g. when used by Cluster Autoscaler which only runs the scheduling phase, and simulates binding in-memory), -// the tracker allows the framework user to obtain the claim allocations produced by the DRA plugin, and persist them outside of the API (e.g. in-memory). -type ResourceClaimTracker interface { - // List lists ResourceClaims. The result is guaranteed to immediately include any changes made via AssumeClaimAfterAPICall(), - // and SignalClaimPendingAllocation(). - List() ([]*resourceapi.ResourceClaim, error) - // Get works like List(), but for a single claim. - Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) - // ListAllAllocatedDevices lists all allocated Devices from allocated ResourceClaims. The result is guaranteed to immediately include - // any changes made via AssumeClaimAfterAPICall(), and SignalClaimPendingAllocation(). - ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) - - // SignalClaimPendingAllocation signals to the tracker that the given ResourceClaim will be allocated via an API call in the - // binding phase. This change is immediately reflected in the result of List() and the other accessors. - SignalClaimPendingAllocation(claimUID types.UID, allocatedClaim *resourceapi.ResourceClaim) error - // ClaimHasPendingAllocation answers whether a given claim has a pending allocation during the binding phase. It can be used to avoid - // race conditions in subsequent scheduling phases. - ClaimHasPendingAllocation(claimUID types.UID) bool - // RemoveClaimPendingAllocation removes the pending allocation for the given ResourceClaim from the tracker if any was signaled via - // SignalClaimPendingAllocation(). Returns whether there was a pending allocation to remove. List() and the other accessors immediately - // stop reflecting the pending allocation in the results. - RemoveClaimPendingAllocation(claimUID types.UID) (deleted bool) - - // AssumeClaimAfterAPICall signals to the tracker that an API call modifying the given ResourceClaim was made in the binding phase, and the - // changes should be reflected in informers very soon. This change is immediately reflected in the result of List() and the other accessors. - // This mechanism can be used to avoid race conditions between the informer update and subsequent scheduling phases. - AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error - // AssumedClaimRestore signals to the tracker that something went wrong with the API call modifying the given ResourceClaim, and - // the changes won't be reflected in informers after all. List() and the other accessors immediately stop reflecting the assumed change, - // and go back to the informer version. - AssumedClaimRestore(namespace, claimName string) -} - -// SharedDRAManager can be used to obtain DRA objects, and track modifications to them in-memory - mainly by the DRA plugin. -// The plugin's default implementation obtains the objects from the API. A different implementation can be -// plugged into the framework in order to simulate the state of DRA objects. For example, Cluster Autoscaler -// can use this to provide the correct DRA object state to the DRA plugin when simulating scheduling changes in-memory. -type SharedDRAManager interface { - ResourceClaims() ResourceClaimTracker - ResourceSlices() ResourceSliceLister - DeviceClasses() DeviceClassLister -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/parallelize/error_channel.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/parallelize/error_channel.go deleted file mode 100644 index 2eff825bae8..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/parallelize/error_channel.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parallelize - -import "context" - -// ErrorChannel supports non-blocking send and receive operation to capture error. -// A maximum of one error is kept in the channel and the rest of the errors sent -// are ignored, unless the existing error is received and the channel becomes empty -// again. -type ErrorChannel struct { - errCh chan error -} - -// SendError sends an error without blocking the sender. -func (e *ErrorChannel) SendError(err error) { - select { - case e.errCh <- err: - default: - } -} - -// SendErrorWithCancel sends an error without blocking the sender and calls -// cancel function. -func (e *ErrorChannel) SendErrorWithCancel(err error, cancel context.CancelFunc) { - e.SendError(err) - cancel() -} - -// ReceiveError receives an error from channel without blocking on the receiver. -func (e *ErrorChannel) ReceiveError() error { - select { - case err := <-e.errCh: - return err - default: - return nil - } -} - -// NewErrorChannel returns a new ErrorChannel. -func NewErrorChannel() *ErrorChannel { - return &ErrorChannel{ - errCh: make(chan error, 1), - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/parallelize/parallelism.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/parallelize/parallelism.go deleted file mode 100644 index 2ac14289a0c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/parallelize/parallelism.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parallelize - -import ( - "context" - "math" - - "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/scheduler/metrics" -) - -// DefaultParallelism is the default parallelism used in scheduler. -const DefaultParallelism int = 16 - -// Parallelizer holds the parallelism for scheduler. -type Parallelizer struct { - parallelism int -} - -// NewParallelizer returns an object holding the parallelism. -func NewParallelizer(p int) Parallelizer { - return Parallelizer{parallelism: p} -} - -// chunkSizeFor returns a chunk size for the given number of items to use for -// parallel work. The size aims to produce good CPU utilization. -// returns max(1, min(sqrt(n), n/Parallelism)) -func chunkSizeFor(n, parallelism int) int { - s := int(math.Sqrt(float64(n))) - - if r := n/parallelism + 1; s > r { - s = r - } else if s < 1 { - s = 1 - } - return s -} - -// numWorkersForChunkSize returns number of workers (goroutines) -// that will be created in workqueue.ParallelizeUntil -// for given parallelism, pieces and chunkSize values. -func numWorkersForChunkSize(parallelism, pieces, chunkSize int) int { - chunks := (pieces + chunkSize - 1) / chunkSize - if chunks < parallelism { - return chunks - } - return parallelism -} - -// Until is a wrapper around workqueue.ParallelizeUntil to use in scheduling algorithms. -// A given operation will be a label that is recorded in the goroutine metric. -func (p Parallelizer) Until(ctx context.Context, pieces int, doWorkPiece workqueue.DoWorkPieceFunc, operation string) { - chunkSize := chunkSizeFor(pieces, p.parallelism) - workers := numWorkersForChunkSize(p.parallelism, pieces, chunkSize) - - goroutinesMetric := metrics.Goroutines.WithLabelValues(operation) - // Calling single Add with workers' count is more efficient than calling Inc or Dec per each work piece. - // This approach improves performance of some plugins (affinity, topology spreading) as well as preemption. - goroutinesMetric.Add(float64(workers)) - defer goroutinesMetric.Add(float64(-workers)) - - workqueue.ParallelizeUntil(ctx, p.parallelism, pieces, doWorkPiece, workqueue.WithChunkSize(chunkSize)) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/README.md b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/README.md deleted file mode 100644 index 6fba8bb849b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Scheduler Framework Plugins - -Moved [here](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-scheduling/scheduler_framework_plugins.md). diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder/default_binder.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder/default_binder.go deleted file mode 100644 index aa1ed6a6943..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder/default_binder.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package defaultbinder - -import ( - "context" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" -) - -// Name of the plugin used in the plugin registry and configurations. -const Name = names.DefaultBinder - -// DefaultBinder binds pods to nodes using a k8s client. -type DefaultBinder struct { - handle framework.Handle -} - -var _ framework.BindPlugin = &DefaultBinder{} - -// New creates a DefaultBinder. -func New(_ context.Context, _ runtime.Object, handle framework.Handle) (framework.Plugin, error) { - return &DefaultBinder{handle: handle}, nil -} - -// Name returns the name of the plugin. -func (b DefaultBinder) Name() string { - return Name -} - -// Bind binds pods to nodes using the k8s client. -func (b DefaultBinder) Bind(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status { - logger := klog.FromContext(ctx) - logger.V(3).Info("Attempting to bind pod to node", "pod", klog.KObj(p), "node", klog.KRef("", nodeName)) - binding := &v1.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: p.Namespace, Name: p.Name, UID: p.UID}, - Target: v1.ObjectReference{Kind: "Node", Name: nodeName}, - } - err := b.handle.ClientSet().CoreV1().Pods(binding.Namespace).Bind(ctx, binding, metav1.CreateOptions{}) - if err != nil { - return framework.AsStatus(err) - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go deleted file mode 100644 index c36528530f9..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go +++ /dev/null @@ -1,368 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package defaultpreemption - -import ( - "context" - "fmt" - "math/rand" - "sort" - - v1 "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - corelisters "k8s.io/client-go/listers/core/v1" - policylisters "k8s.io/client-go/listers/policy/v1" - corev1helpers "k8s.io/component-helpers/scheduling/corev1" - "k8s.io/klog/v2" - extenderv1 "k8s.io/kube-scheduler/extender/v1" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/framework/preemption" - "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// Name of the plugin used in the plugin registry and configurations. -const Name = names.DefaultPreemption - -// DefaultPreemption is a PostFilter plugin implements the preemption logic. -type DefaultPreemption struct { - fh framework.Handle - fts feature.Features - args config.DefaultPreemptionArgs - podLister corelisters.PodLister - pdbLister policylisters.PodDisruptionBudgetLister - Evaluator *preemption.Evaluator -} - -var _ framework.PostFilterPlugin = &DefaultPreemption{} -var _ framework.PreEnqueuePlugin = &DefaultPreemption{} - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *DefaultPreemption) Name() string { - return Name -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, dpArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { - args, ok := dpArgs.(*config.DefaultPreemptionArgs) - if !ok { - return nil, fmt.Errorf("got args of type %T, want *DefaultPreemptionArgs", dpArgs) - } - if err := validation.ValidateDefaultPreemptionArgs(nil, args); err != nil { - return nil, err - } - - podLister := fh.SharedInformerFactory().Core().V1().Pods().Lister() - pdbLister := getPDBLister(fh.SharedInformerFactory()) - - pl := DefaultPreemption{ - fh: fh, - fts: fts, - args: *args, - podLister: podLister, - pdbLister: pdbLister, - } - pl.Evaluator = preemption.NewEvaluator(Name, fh, &pl, fts.EnableAsyncPreemption) - - return &pl, nil -} - -// PostFilter invoked at the postFilter extension point. -func (pl *DefaultPreemption) PostFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, m framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) { - defer func() { - metrics.PreemptionAttempts.Inc() - }() - - result, status := pl.Evaluator.Preempt(ctx, state, pod, m) - msg := status.Message() - if len(msg) > 0 { - return result, framework.NewStatus(status.Code(), "preemption: "+msg) - } - return result, status -} - -func (pl *DefaultPreemption) PreEnqueue(ctx context.Context, p *v1.Pod) *framework.Status { - if !pl.fts.EnableAsyncPreemption { - return nil - } - if pl.Evaluator.IsPodRunningPreemption(p.GetUID()) { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, "waiting for the preemption for this pod to be finished") - } - return nil -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *DefaultPreemption) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - // The plugin moves the preemptor Pod to acviteQ/backoffQ once the preemption API calls are all done, - // and we don't need to move the Pod with any events. - return nil, nil -} - -// calculateNumCandidates returns the number of candidates the FindCandidates -// method must produce from dry running based on the constraints given by -// and . The number of -// candidates returned will never be greater than . -func (pl *DefaultPreemption) calculateNumCandidates(numNodes int32) int32 { - n := (numNodes * pl.args.MinCandidateNodesPercentage) / 100 - if n < pl.args.MinCandidateNodesAbsolute { - n = pl.args.MinCandidateNodesAbsolute - } - if n > numNodes { - n = numNodes - } - return n -} - -// getOffsetRand is a dedicated random source for GetOffsetAndNumCandidates calls. -// It defaults to rand.Int31n, but is a package variable so it can be overridden to make unit tests deterministic. -var getOffsetRand = rand.Int31n - -// GetOffsetAndNumCandidates chooses a random offset and calculates the number -// of candidates that should be shortlisted for dry running preemption. -func (pl *DefaultPreemption) GetOffsetAndNumCandidates(numNodes int32) (int32, int32) { - return getOffsetRand(numNodes), pl.calculateNumCandidates(numNodes) -} - -// This function is not applicable for out-of-tree preemption plugins that exercise -// different preemption candidates on the same nominated node. -func (pl *DefaultPreemption) CandidatesToVictimsMap(candidates []preemption.Candidate) map[string]*extenderv1.Victims { - m := make(map[string]*extenderv1.Victims, len(candidates)) - for _, c := range candidates { - m[c.Name()] = c.Victims() - } - return m -} - -// SelectVictimsOnNode finds minimum set of pods on the given node that should be preempted in order to make enough room -// for "pod" to be scheduled. -func (pl *DefaultPreemption) SelectVictimsOnNode( - ctx context.Context, - state *framework.CycleState, - pod *v1.Pod, - nodeInfo *framework.NodeInfo, - pdbs []*policy.PodDisruptionBudget) ([]*v1.Pod, int, *framework.Status) { - logger := klog.FromContext(ctx) - var potentialVictims []*framework.PodInfo - removePod := func(rpi *framework.PodInfo) error { - if err := nodeInfo.RemovePod(logger, rpi.Pod); err != nil { - return err - } - status := pl.fh.RunPreFilterExtensionRemovePod(ctx, state, pod, rpi, nodeInfo) - if !status.IsSuccess() { - return status.AsError() - } - return nil - } - addPod := func(api *framework.PodInfo) error { - nodeInfo.AddPodInfo(api) - status := pl.fh.RunPreFilterExtensionAddPod(ctx, state, pod, api, nodeInfo) - if !status.IsSuccess() { - return status.AsError() - } - return nil - } - // As the first step, remove all the lower priority pods from the node and - // check if the given pod can be scheduled. - podPriority := corev1helpers.PodPriority(pod) - for _, pi := range nodeInfo.Pods { - if corev1helpers.PodPriority(pi.Pod) < podPriority { - potentialVictims = append(potentialVictims, pi) - if err := removePod(pi); err != nil { - return nil, 0, framework.AsStatus(err) - } - } - } - - // No potential victims are found, and so we don't need to evaluate the node again since its state didn't change. - if len(potentialVictims) == 0 { - return nil, 0, framework.NewStatus(framework.UnschedulableAndUnresolvable, "No preemption victims found for incoming pod") - } - - // If the new pod does not fit after removing all the lower priority pods, - // we are almost done and this node is not suitable for preemption. The only - // condition that we could check is if the "pod" is failing to schedule due to - // inter-pod affinity to one or more victims, but we have decided not to - // support this case for performance reasons. Having affinity to lower - // priority pods is not a recommended configuration anyway. - if status := pl.fh.RunFilterPluginsWithNominatedPods(ctx, state, pod, nodeInfo); !status.IsSuccess() { - return nil, 0, status - } - var victims []*v1.Pod - numViolatingVictim := 0 - // Sort potentialVictims by pod priority from high to low, which ensures to - // reprieve higher priority pods first. - sort.Slice(potentialVictims, func(i, j int) bool { return util.MoreImportantPod(potentialVictims[i].Pod, potentialVictims[j].Pod) }) - // Try to reprieve as many pods as possible. We first try to reprieve the PDB - // violating victims and then other non-violating ones. In both cases, we start - // from the highest priority victims. - violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims, pdbs) - reprievePod := func(pi *framework.PodInfo) (bool, error) { - if err := addPod(pi); err != nil { - return false, err - } - status := pl.fh.RunFilterPluginsWithNominatedPods(ctx, state, pod, nodeInfo) - fits := status.IsSuccess() - if !fits { - if err := removePod(pi); err != nil { - return false, err - } - rpi := pi.Pod - victims = append(victims, rpi) - logger.V(5).Info("Pod is a potential preemption victim on node", "pod", klog.KObj(rpi), "node", klog.KObj(nodeInfo.Node())) - } - return fits, nil - } - for _, p := range violatingVictims { - if fits, err := reprievePod(p); err != nil { - return nil, 0, framework.AsStatus(err) - } else if !fits { - numViolatingVictim++ - } - } - // Now we try to reprieve non-violating victims. - for _, p := range nonViolatingVictims { - if _, err := reprievePod(p); err != nil { - return nil, 0, framework.AsStatus(err) - } - } - - // Sort victims after reprieving pods to keep the pods in the victims sorted in order of priority from high to low. - if len(violatingVictims) != 0 && len(nonViolatingVictims) != 0 { - sort.Slice(victims, func(i, j int) bool { return util.MoreImportantPod(victims[i], victims[j]) }) - } - return victims, numViolatingVictim, framework.NewStatus(framework.Success) -} - -// PodEligibleToPreemptOthers returns one bool and one string. The bool -// indicates whether this pod should be considered for preempting other pods or -// not. The string includes the reason if this pod isn't eligible. -// There're several reasons: -// 1. The pod has a preemptionPolicy of Never. -// 2. The pod has already preempted other pods and the victims are in their graceful termination period. -// Currently we check the node that is nominated for this pod, and as long as there are -// terminating pods on this node, we don't attempt to preempt more pods. -func (pl *DefaultPreemption) PodEligibleToPreemptOthers(_ context.Context, pod *v1.Pod, nominatedNodeStatus *framework.Status) (bool, string) { - if pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever { - return false, "not eligible due to preemptionPolicy=Never." - } - - nodeInfos := pl.fh.SnapshotSharedLister().NodeInfos() - nomNodeName := pod.Status.NominatedNodeName - if len(nomNodeName) > 0 { - // If the pod's nominated node is considered as UnschedulableAndUnresolvable by the filters, - // then the pod should be considered for preempting again. - if nominatedNodeStatus.Code() == framework.UnschedulableAndUnresolvable { - return true, "" - } - - if nodeInfo, _ := nodeInfos.Get(nomNodeName); nodeInfo != nil { - podPriority := corev1helpers.PodPriority(pod) - for _, p := range nodeInfo.Pods { - if corev1helpers.PodPriority(p.Pod) < podPriority && podTerminatingByPreemption(p.Pod) { - // There is a terminating pod on the nominated node. - return false, "not eligible due to a terminating pod on the nominated node." - } - } - } - } - return true, "" -} - -// OrderedScoreFuncs returns a list of ordered score functions to select preferable node where victims will be preempted. -func (pl *DefaultPreemption) OrderedScoreFuncs(ctx context.Context, nodesToVictims map[string]*extenderv1.Victims) []func(node string) int64 { - return nil -} - -// podTerminatingByPreemption returns true if the pod is in the termination state caused by scheduler preemption. -func podTerminatingByPreemption(p *v1.Pod) bool { - if p.DeletionTimestamp == nil { - return false - } - - for _, condition := range p.Status.Conditions { - if condition.Type == v1.DisruptionTarget { - return condition.Status == v1.ConditionTrue && condition.Reason == v1.PodReasonPreemptionByScheduler - } - } - return false -} - -// filterPodsWithPDBViolation groups the given "pods" into two groups of "violatingPods" -// and "nonViolatingPods" based on whether their PDBs will be violated if they are -// preempted. -// This function is stable and does not change the order of received pods. So, if it -// receives a sorted list, grouping will preserve the order of the input list. -func filterPodsWithPDBViolation(podInfos []*framework.PodInfo, pdbs []*policy.PodDisruptionBudget) (violatingPodInfos, nonViolatingPodInfos []*framework.PodInfo) { - pdbsAllowed := make([]int32, len(pdbs)) - for i, pdb := range pdbs { - pdbsAllowed[i] = pdb.Status.DisruptionsAllowed - } - - for _, podInfo := range podInfos { - pod := podInfo.Pod - pdbForPodIsViolated := false - // A pod with no labels will not match any PDB. So, no need to check. - if len(pod.Labels) != 0 { - for i, pdb := range pdbs { - if pdb.Namespace != pod.Namespace { - continue - } - selector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector) - if err != nil { - // This object has an invalid selector, it does not match the pod - continue - } - // A PDB with a nil or empty selector matches nothing. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - - // Existing in DisruptedPods means it has been processed in API server, - // we don't treat it as a violating case. - if _, exist := pdb.Status.DisruptedPods[pod.Name]; exist { - continue - } - // Only decrement the matched pdb when it's not in its ; - // otherwise we may over-decrement the budget number. - pdbsAllowed[i]-- - // We have found a matching PDB. - if pdbsAllowed[i] < 0 { - pdbForPodIsViolated = true - } - } - } - if pdbForPodIsViolated { - violatingPodInfos = append(violatingPodInfos, podInfo) - } else { - nonViolatingPodInfos = append(nonViolatingPodInfos, podInfo) - } - } - return violatingPodInfos, nonViolatingPodInfos -} - -func getPDBLister(informerFactory informers.SharedInformerFactory) policylisters.PodDisruptionBudgetLister { - return informerFactory.Policy().V1().PodDisruptionBudgets().Lister() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/OWNERS deleted file mode 100644 index 9dcfffa88e1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - klueska - - pohly - - bart0sh -labels: - - sig/node - - wg/device-management diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/allocateddevices.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/allocateddevices.go deleted file mode 100644 index 9c8d2305b20..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/allocateddevices.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamicresources - -import ( - "sync" - - resourceapi "k8s.io/api/resource/v1beta1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/tools/cache" - "k8s.io/dynamic-resource-allocation/structured" - "k8s.io/klog/v2" - schedutil "k8s.io/kubernetes/pkg/scheduler/util" - "k8s.io/utils/ptr" -) - -// foreachAllocatedDevice invokes the provided callback for each -// device in the claim's allocation result which was allocated -// exclusively for the claim. -// -// Devices allocated with admin access can be shared with other -// claims and are skipped without invoking the callback. -// -// foreachAllocatedDevice does nothing if the claim is not allocated. -func foreachAllocatedDevice(claim *resourceapi.ResourceClaim, cb func(deviceID structured.DeviceID)) { - if claim.Status.Allocation == nil { - return - } - for _, result := range claim.Status.Allocation.Devices.Results { - // Kubernetes 1.31 did not set this, 1.32 always does. - // Supporting 1.31 is not worth the additional code that - // would have to be written (= looking up in request) because - // it is extremely unlikely that there really is a result - // that still exists in a cluster from 1.31 where this matters. - if ptr.Deref(result.AdminAccess, false) { - // Is not considered as allocated. - continue - } - deviceID := structured.MakeDeviceID(result.Driver, result.Pool, result.Device) - - // None of the users of this helper need to abort iterating, - // therefore it's not supported as it only would add overhead. - cb(deviceID) - } -} - -// allocatedDevices reacts to events in a cache and maintains a set of all allocated devices. -// This is cheaper than repeatedly calling List, making strings unique, and building the set -// each time PreFilter is called. -// -// All methods are thread-safe. Get returns a cloned set. -type allocatedDevices struct { - logger klog.Logger - - mutex sync.RWMutex - ids sets.Set[structured.DeviceID] -} - -func newAllocatedDevices(logger klog.Logger) *allocatedDevices { - return &allocatedDevices{ - logger: logger, - ids: sets.New[structured.DeviceID](), - } -} - -func (a *allocatedDevices) Get() sets.Set[structured.DeviceID] { - a.mutex.RLock() - defer a.mutex.RUnlock() - - return a.ids.Clone() -} - -func (a *allocatedDevices) handlers() cache.ResourceEventHandler { - return cache.ResourceEventHandlerFuncs{ - AddFunc: a.onAdd, - UpdateFunc: a.onUpdate, - DeleteFunc: a.onDelete, - } -} - -func (a *allocatedDevices) onAdd(obj any) { - claim, _, err := schedutil.As[*resourceapi.ResourceClaim](obj, nil) - if err != nil { - // Shouldn't happen. - a.logger.Error(err, "unexpected object in allocatedDevices.onAdd") - return - } - - if claim.Status.Allocation != nil { - a.addDevices(claim) - } -} - -func (a *allocatedDevices) onUpdate(oldObj, newObj any) { - originalClaim, modifiedClaim, err := schedutil.As[*resourceapi.ResourceClaim](oldObj, newObj) - if err != nil { - // Shouldn't happen. - a.logger.Error(err, "unexpected object in allocatedDevices.onUpdate") - return - } - - switch { - case originalClaim.Status.Allocation == nil && modifiedClaim.Status.Allocation != nil: - a.addDevices(modifiedClaim) - case originalClaim.Status.Allocation != nil && modifiedClaim.Status.Allocation == nil: - a.removeDevices(originalClaim) - default: - // Nothing to do. Either both nil or both non-nil, in which case the content - // also must be the same (immutable!). - } -} - -func (a *allocatedDevices) onDelete(obj any) { - claim, _, err := schedutil.As[*resourceapi.ResourceClaim](obj, nil) - if err != nil { - // Shouldn't happen. - a.logger.Error(err, "unexpected object in allocatedDevices.onDelete") - return - } - - a.removeDevices(claim) -} - -func (a *allocatedDevices) addDevices(claim *resourceapi.ResourceClaim) { - if claim.Status.Allocation == nil { - return - } - // Locking of the mutex gets minimized by pre-computing what needs to be done - // without holding the lock. - deviceIDs := make([]structured.DeviceID, 0, 20) - foreachAllocatedDevice(claim, func(deviceID structured.DeviceID) { - a.logger.V(6).Info("Observed device allocation", "device", deviceID, "claim", klog.KObj(claim)) - deviceIDs = append(deviceIDs, deviceID) - }) - - a.mutex.Lock() - defer a.mutex.Unlock() - for _, deviceID := range deviceIDs { - a.ids.Insert(deviceID) - } -} - -func (a *allocatedDevices) removeDevices(claim *resourceapi.ResourceClaim) { - if claim.Status.Allocation == nil { - return - } - - // Locking of the mutex gets minimized by pre-computing what needs to be done - // without holding the lock. - deviceIDs := make([]structured.DeviceID, 0, 20) - foreachAllocatedDevice(claim, func(deviceID structured.DeviceID) { - a.logger.V(6).Info("Observed device deallocation", "device", deviceID, "claim", klog.KObj(claim)) - deviceIDs = append(deviceIDs, deviceID) - }) - - a.mutex.Lock() - defer a.mutex.Unlock() - for _, deviceID := range deviceIDs { - a.ids.Delete(deviceID) - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dra_manager.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dra_manager.go deleted file mode 100644 index 47d3d5de4ce..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dra_manager.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamicresources - -import ( - "context" - "fmt" - "sync" - - resourceapi "k8s.io/api/resource/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/informers" - resourcelisters "k8s.io/client-go/listers/resource/v1beta1" - resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker" - "k8s.io/dynamic-resource-allocation/structured" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/util/assumecache" -) - -var _ framework.SharedDRAManager = &DefaultDRAManager{} - -// DefaultDRAManager is the default implementation of SharedDRAManager. It obtains the DRA objects -// from API informers, and uses an AssumeCache and a map of in-flight allocations in order -// to avoid race conditions when modifying ResourceClaims. -type DefaultDRAManager struct { - resourceClaimTracker *claimTracker - resourceSliceLister *resourceSliceLister - deviceClassLister *deviceClassLister -} - -func NewDRAManager(ctx context.Context, claimsCache *assumecache.AssumeCache, resourceSliceTracker *resourceslicetracker.Tracker, informerFactory informers.SharedInformerFactory) *DefaultDRAManager { - logger := klog.FromContext(ctx) - - manager := &DefaultDRAManager{ - resourceClaimTracker: &claimTracker{ - cache: claimsCache, - inFlightAllocations: &sync.Map{}, - allocatedDevices: newAllocatedDevices(logger), - logger: logger, - }, - resourceSliceLister: &resourceSliceLister{tracker: resourceSliceTracker}, - deviceClassLister: &deviceClassLister{classLister: informerFactory.Resource().V1beta1().DeviceClasses().Lister()}, - } - - // Reacting to events is more efficient than iterating over the list - // repeatedly in PreFilter. - manager.resourceClaimTracker.cache.AddEventHandler(manager.resourceClaimTracker.allocatedDevices.handlers()) - - return manager -} - -func (s *DefaultDRAManager) ResourceClaims() framework.ResourceClaimTracker { - return s.resourceClaimTracker -} - -func (s *DefaultDRAManager) ResourceSlices() framework.ResourceSliceLister { - return s.resourceSliceLister -} - -func (s *DefaultDRAManager) DeviceClasses() framework.DeviceClassLister { - return s.deviceClassLister -} - -var _ framework.ResourceSliceLister = &resourceSliceLister{} - -type resourceSliceLister struct { - tracker *resourceslicetracker.Tracker -} - -func (l *resourceSliceLister) ListWithDeviceTaintRules() ([]*resourceapi.ResourceSlice, error) { - return l.tracker.ListPatchedResourceSlices() -} - -var _ framework.DeviceClassLister = &deviceClassLister{} - -type deviceClassLister struct { - classLister resourcelisters.DeviceClassLister -} - -func (l *deviceClassLister) Get(className string) (*resourceapi.DeviceClass, error) { - return l.classLister.Get(className) -} - -func (l *deviceClassLister) List() ([]*resourceapi.DeviceClass, error) { - return l.classLister.List(labels.Everything()) -} - -var _ framework.ResourceClaimTracker = &claimTracker{} - -type claimTracker struct { - // cache enables temporarily storing a newer claim object - // while the scheduler has allocated it and the corresponding object - // update from the apiserver has not been processed by the claim - // informer callbacks. ResourceClaimTracker get added here in PreBind and removed by - // the informer callback (based on the "newer than" comparison in the - // assume cache). - // - // It uses cache.MetaNamespaceKeyFunc to generate object names, which - // therefore are "/". - // - // This is necessary to ensure that reconstructing the resource usage - // at the start of a pod scheduling cycle doesn't reuse the resources - // assigned to such a claim. Alternatively, claim allocation state - // could also get tracked across pod scheduling cycles, but that - // - adds complexity (need to carefully sync state with informer events - // for claims and ResourceSlices) - // - would make integration with cluster autoscaler harder because it would need - // to trigger informer callbacks. - cache *assumecache.AssumeCache - // inFlightAllocations is a map from claim UUIDs to claim objects for those claims - // for which allocation was triggered during a scheduling cycle and the - // corresponding claim status update call in PreBind has not been done - // yet. If another pod needs the claim, the pod is treated as "not - // schedulable yet". The cluster event for the claim status update will - // make it schedulable. - // - // This mechanism avoids the following problem: - // - Pod A triggers allocation for claim X. - // - Pod B shares access to that claim and gets scheduled because - // the claim is assumed to be allocated. - // - PreBind for pod B is called first, tries to update reservedFor and - // fails because the claim is not really allocated yet. - // - // We could avoid the ordering problem by allowing either pod A or pod B - // to set the allocation. But that is more complicated and leads to another - // problem: - // - Pod A and B get scheduled as above. - // - PreBind for pod A gets called first, then fails with a temporary API error. - // It removes the updated claim from the assume cache because of that. - // - PreBind for pod B gets called next and succeeds with adding the - // allocation and its own reservedFor entry. - // - The assume cache is now not reflecting that the claim is allocated, - // which could lead to reusing the same resource for some other claim. - // - // A sync.Map is used because in practice sharing of a claim between - // pods is expected to be rare compared to per-pod claim, so we end up - // hitting the "multiple goroutines read, write, and overwrite entries - // for disjoint sets of keys" case that sync.Map is optimized for. - inFlightAllocations *sync.Map - allocatedDevices *allocatedDevices - logger klog.Logger -} - -func (c *claimTracker) ClaimHasPendingAllocation(claimUID types.UID) bool { - _, found := c.inFlightAllocations.Load(claimUID) - return found -} - -func (c *claimTracker) SignalClaimPendingAllocation(claimUID types.UID, allocatedClaim *resourceapi.ResourceClaim) error { - c.inFlightAllocations.Store(claimUID, allocatedClaim) - // There's no reason to return an error in this implementation, but the error is helpful for other implementations. - // For example, implementations that have to deal with fake claims might want to return an error if the allocation - // is for an invalid claim. - return nil -} - -func (c *claimTracker) RemoveClaimPendingAllocation(claimUID types.UID) (deleted bool) { - _, found := c.inFlightAllocations.LoadAndDelete(claimUID) - return found -} - -func (c *claimTracker) Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) { - obj, err := c.cache.Get(namespace + "/" + claimName) - if err != nil { - return nil, err - } - claim, ok := obj.(*resourceapi.ResourceClaim) - if !ok { - return nil, fmt.Errorf("unexpected object type %T for assumed object %s/%s", obj, namespace, claimName) - } - return claim, nil -} - -func (c *claimTracker) List() ([]*resourceapi.ResourceClaim, error) { - var result []*resourceapi.ResourceClaim - // Probably not worth adding an index for? - objs := c.cache.List(nil) - for _, obj := range objs { - claim, ok := obj.(*resourceapi.ResourceClaim) - if ok { - result = append(result, claim) - } - } - return result, nil -} - -func (c *claimTracker) ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) { - // Start with a fresh set that matches the current known state of the - // world according to the informers. - allocated := c.allocatedDevices.Get() - - // Whatever is in flight also has to be checked. - c.inFlightAllocations.Range(func(key, value any) bool { - claim := value.(*resourceapi.ResourceClaim) - foreachAllocatedDevice(claim, func(deviceID structured.DeviceID) { - c.logger.V(6).Info("Device is in flight for allocation", "device", deviceID, "claim", klog.KObj(claim)) - allocated.Insert(deviceID) - }) - return true - }) - // There's no reason to return an error in this implementation, but the error might be helpful for other implementations. - return allocated, nil -} - -func (c *claimTracker) AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error { - return c.cache.Assume(claim) -} - -func (c *claimTracker) AssumedClaimRestore(namespace, claimName string) { - c.cache.Restore(namespace + "/" + claimName) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go deleted file mode 100644 index dd1adfc5aa7..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go +++ /dev/null @@ -1,910 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamicresources - -import ( - "context" - "errors" - "fmt" - "slices" - "strings" - "sync" - - "github.com/google/go-cmp/cmp" //nolint:depguard - - v1 "k8s.io/api/core/v1" - resourceapi "k8s.io/api/resource/v1beta1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/retry" - "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" - "k8s.io/dynamic-resource-allocation/cel" - "k8s.io/dynamic-resource-allocation/resourceclaim" - "k8s.io/dynamic-resource-allocation/structured" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - schedutil "k8s.io/kubernetes/pkg/scheduler/util" -) - -const ( - // Name is the name of the plugin used in Registry and configurations. - Name = names.DynamicResources - - stateKey framework.StateKey = Name -) - -// The state is initialized in PreFilter phase. Because we save the pointer in -// framework.CycleState, in the later phases we don't need to call Write method -// to update the value -type stateData struct { - // A copy of all claims for the Pod (i.e. 1:1 match with - // pod.Spec.ResourceClaims), initially with the status from the start - // of the scheduling cycle. Each claim instance is read-only because it - // might come from the informer cache. The instances get replaced when - // the plugin itself successfully does an Update. - // - // Empty if the Pod has no claims. - claims []*resourceapi.ResourceClaim - - // Allocator handles claims with structured parameters. - allocator *structured.Allocator - - // mutex must be locked while accessing any of the fields below. - mutex sync.Mutex - - // The indices of all claims that: - // - are allocated - // - use delayed allocation or the builtin controller - // - were not available on at least one node - // - // Set in parallel during Filter, so write access there must be - // protected by the mutex. Used by PostFilter. - unavailableClaims sets.Set[int] - - informationsForClaim []informationForClaim - - // nodeAllocations caches the result of Filter for the nodes. - nodeAllocations map[string][]resourceapi.AllocationResult -} - -func (d *stateData) Clone() framework.StateData { - return d -} - -type informationForClaim struct { - // Node selector based on the claim status if allocated. - availableOnNodes *nodeaffinity.NodeSelector - - // Set by Reserved, published by PreBind. - allocation *resourceapi.AllocationResult -} - -// DynamicResources is a plugin that ensures that ResourceClaims are allocated. -type DynamicResources struct { - enabled bool - enableAdminAccess bool - enablePrioritizedList bool - enableSchedulingQueueHint bool - enablePartitionableDevices bool - enableDeviceTaints bool - - fh framework.Handle - clientset kubernetes.Interface - celCache *cel.Cache - draManager framework.SharedDRAManager -} - -// New initializes a new plugin and returns it. -func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { - if !fts.EnableDynamicResourceAllocation { - // Disabled, won't do anything. - return &DynamicResources{}, nil - } - - pl := &DynamicResources{ - enabled: true, - enableAdminAccess: fts.EnableDRAAdminAccess, - enableDeviceTaints: fts.EnableDRADeviceTaints, - enablePrioritizedList: fts.EnableDRAPrioritizedList, - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - enablePartitionableDevices: fts.EnablePartitionableDevices, - - fh: fh, - clientset: fh.ClientSet(), - // This is a LRU cache for compiled CEL expressions. The most - // recent 10 of them get reused across different scheduling - // cycles. - celCache: cel.NewCache(10), - draManager: fh.SharedDRAManager(), - } - - return pl, nil -} - -var _ framework.PreEnqueuePlugin = &DynamicResources{} -var _ framework.PreFilterPlugin = &DynamicResources{} -var _ framework.FilterPlugin = &DynamicResources{} -var _ framework.PostFilterPlugin = &DynamicResources{} -var _ framework.ReservePlugin = &DynamicResources{} -var _ framework.EnqueueExtensions = &DynamicResources{} -var _ framework.PreBindPlugin = &DynamicResources{} - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *DynamicResources) Name() string { - return Name -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *DynamicResources) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - if !pl.enabled { - return nil, nil - } - // A resource might depend on node labels for topology filtering. - // A new or updated node may make pods schedulable. - // - // A note about UpdateNodeTaint event: - // Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - nodeActionType := framework.Add | framework.UpdateNodeLabel | framework.UpdateNodeTaint - if pl.enableSchedulingQueueHint { - // When QHint is enabled, the problematic preCheck is already removed, and we can remove UpdateNodeTaint. - nodeActionType = framework.Add | framework.UpdateNodeLabel - } - - events := []framework.ClusterEventWithHint{ - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}}, - // Allocation is tracked in ResourceClaims, so any changes may make the pods schedulable. - {Event: framework.ClusterEvent{Resource: framework.ResourceClaim, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterClaimChange}, - // Adding the ResourceClaim name to the pod status makes pods waiting for their ResourceClaim schedulable. - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.UpdatePodGeneratedResourceClaim}, QueueingHintFn: pl.isSchedulableAfterPodChange}, - // A pod might be waiting for a class to get created or modified. - {Event: framework.ClusterEvent{Resource: framework.DeviceClass, ActionType: framework.Add | framework.Update}}, - // Adding or updating a ResourceSlice might make a pod schedulable because new resources became available. - {Event: framework.ClusterEvent{Resource: framework.ResourceSlice, ActionType: framework.Add | framework.Update}}, - } - - return events, nil -} - -// PreEnqueue checks if there are known reasons why a pod currently cannot be -// scheduled. When this fails, one of the registered events can trigger another -// attempt. -func (pl *DynamicResources) PreEnqueue(ctx context.Context, pod *v1.Pod) (status *framework.Status) { - if !pl.enabled { - return nil - } - - if err := pl.foreachPodResourceClaim(pod, nil); err != nil { - return statusUnschedulable(klog.FromContext(ctx), err.Error()) - } - return nil -} - -// isSchedulableAfterClaimChange is invoked for add and update claim events reported by -// an informer. It checks whether that change made a previously unschedulable -// pod schedulable. It errs on the side of letting a pod scheduling attempt -// happen. The delete claim event will not invoke it, so newObj will never be nil. -func (pl *DynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalClaim, modifiedClaim, err := schedutil.As[*resourceapi.ResourceClaim](oldObj, newObj) - if err != nil { - // Shouldn't happen. - return framework.Queue, fmt.Errorf("unexpected object in isSchedulableAfterClaimChange: %w", err) - } - - usesClaim := false - if err := pl.foreachPodResourceClaim(pod, func(_ string, claim *resourceapi.ResourceClaim) { - if claim.UID == modifiedClaim.UID { - usesClaim = true - } - }); err != nil { - // This is not an unexpected error: we know that - // foreachPodResourceClaim only returns errors for "not - // schedulable". - if loggerV := logger.V(6); loggerV.Enabled() { - owner := metav1.GetControllerOf(modifiedClaim) - loggerV.Info("pod is not schedulable after resource claim change", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim), "claimOwner", owner, "reason", err.Error()) - } - return framework.QueueSkip, nil - } - - if originalClaim != nil && - originalClaim.Status.Allocation != nil && - modifiedClaim.Status.Allocation == nil { - // A claim with structured parameters was deallocated. This might have made - // resources available for other pods. - logger.V(6).Info("claim with structured parameters got deallocated", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) - return framework.Queue, nil - } - - if !usesClaim { - // This was not the claim the pod was waiting for. - logger.V(6).Info("unrelated claim got modified", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) - return framework.QueueSkip, nil - } - - if originalClaim == nil { - logger.V(5).Info("claim for pod got created", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) - return framework.Queue, nil - } - - // Modifications may or may not be relevant. If the entire - // status is as before, then something else must have changed - // and we don't care. What happens in practice is that the - // resource driver adds the finalizer. - if apiequality.Semantic.DeepEqual(&originalClaim.Status, &modifiedClaim.Status) { - if loggerV := logger.V(7); loggerV.Enabled() { - // Log more information. - loggerV.Info("claim for pod got modified where the pod doesn't care", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim), "diff", cmp.Diff(originalClaim, modifiedClaim)) - } else { - logger.V(6).Info("claim for pod got modified where the pod doesn't care", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) - } - return framework.QueueSkip, nil - } - - logger.V(5).Info("status of claim for pod got updated", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) - return framework.Queue, nil -} - -// isSchedulableAfterPodChange is invoked for update pod events reported by -// an informer. It checks whether that change adds the ResourceClaim(s) that the -// pod has been waiting for. -func (pl *DynamicResources) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - _, modifiedPod, err := schedutil.As[*v1.Pod](nil, newObj) - if err != nil { - // Shouldn't happen. - return framework.Queue, fmt.Errorf("unexpected object in isSchedulableAfterClaimChange: %w", err) - } - - if pod.UID != modifiedPod.UID { - logger.V(7).Info("pod is not schedulable after change in other pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - - if err := pl.foreachPodResourceClaim(modifiedPod, nil); err != nil { - // This is not an unexpected error: we know that - // foreachPodResourceClaim only returns errors for "not - // schedulable". - logger.V(6).Info("pod is not schedulable after being updated", "pod", klog.KObj(pod)) - return framework.QueueSkip, nil - } - - logger.V(5).Info("pod got updated and is schedulable", "pod", klog.KObj(pod)) - return framework.Queue, nil -} - -// podResourceClaims returns the ResourceClaims for all pod.Spec.PodResourceClaims. -func (pl *DynamicResources) podResourceClaims(pod *v1.Pod) ([]*resourceapi.ResourceClaim, error) { - claims := make([]*resourceapi.ResourceClaim, 0, len(pod.Spec.ResourceClaims)) - if err := pl.foreachPodResourceClaim(pod, func(_ string, claim *resourceapi.ResourceClaim) { - // We store the pointer as returned by the lister. The - // assumption is that if a claim gets modified while our code - // runs, the cache will store a new pointer, not mutate the - // existing object that we point to here. - claims = append(claims, claim) - }); err != nil { - return nil, err - } - return claims, nil -} - -// foreachPodResourceClaim checks that each ResourceClaim for the pod exists. -// It calls an optional handler for those claims that it finds. -func (pl *DynamicResources) foreachPodResourceClaim(pod *v1.Pod, cb func(podResourceName string, claim *resourceapi.ResourceClaim)) error { - for _, resource := range pod.Spec.ResourceClaims { - claimName, mustCheckOwner, err := resourceclaim.Name(pod, &resource) - if err != nil { - return err - } - // The claim name might be nil if no underlying resource claim - // was generated for the referenced claim. There are valid use - // cases when this might happen, so we simply skip it. - if claimName == nil { - continue - } - claim, err := pl.draManager.ResourceClaims().Get(pod.Namespace, *claimName) - if err != nil { - return err - } - - if claim.DeletionTimestamp != nil { - return fmt.Errorf("resourceclaim %q is being deleted", claim.Name) - } - - if mustCheckOwner { - if err := resourceclaim.IsForPod(pod, claim); err != nil { - return err - } - } - if cb != nil { - cb(resource.Name, claim) - } - } - return nil -} - -// PreFilter invoked at the prefilter extension point to check if pod has all -// immediate claims bound. UnschedulableAndUnresolvable is returned if -// the pod cannot be scheduled at the moment on any node. -func (pl *DynamicResources) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - if !pl.enabled { - return nil, framework.NewStatus(framework.Skip) - } - logger := klog.FromContext(ctx) - - // If the pod does not reference any claim, we don't need to do - // anything for it. We just initialize an empty state to record that - // observation for the other functions. This gets updated below - // if we get that far. - s := &stateData{} - state.Write(stateKey, s) - - claims, err := pl.podResourceClaims(pod) - if err != nil { - return nil, statusUnschedulable(logger, err.Error()) - } - logger.V(5).Info("pod resource claims", "pod", klog.KObj(pod), "resourceclaims", klog.KObjSlice(claims)) - - // If the pod does not reference any claim, - // DynamicResources Filter has nothing to do with the Pod. - if len(claims) == 0 { - return nil, framework.NewStatus(framework.Skip) - } - - // All claims which the scheduler needs to allocate itself. - allocateClaims := make([]*resourceapi.ResourceClaim, 0, len(claims)) - - s.informationsForClaim = make([]informationForClaim, len(claims)) - for index, claim := range claims { - if claim.Status.Allocation != nil && - !resourceclaim.CanBeReserved(claim) && - !resourceclaim.IsReservedForPod(pod, claim) { - // Resource is in use. The pod has to wait. - return nil, statusUnschedulable(logger, "resourceclaim in use", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim)) - } - - if claim.Status.Allocation != nil { - if claim.Status.Allocation.NodeSelector != nil { - nodeSelector, err := nodeaffinity.NewNodeSelector(claim.Status.Allocation.NodeSelector) - if err != nil { - return nil, statusError(logger, err) - } - s.informationsForClaim[index].availableOnNodes = nodeSelector - } - } else { - allocateClaims = append(allocateClaims, claim) - - // Allocation in flight? Better wait for that - // to finish, see inFlightAllocations - // documentation for details. - if pl.draManager.ResourceClaims().ClaimHasPendingAllocation(claim.UID) { - return nil, statusUnschedulable(logger, fmt.Sprintf("resource claim %s is in the process of being allocated", klog.KObj(claim))) - } - - // Check all requests and device classes. If a class - // does not exist, scheduling cannot proceed, no matter - // how the claim is being allocated. - // - // When using a control plane controller, a class might - // have a node filter. This is useful for trimming the - // initial set of potential nodes before we ask the - // driver(s) for information about the specific pod. - for _, request := range claim.Spec.Devices.Requests { - // The requirements differ depending on whether the request has a list of - // alternative subrequests defined in the firstAvailable field. - if len(request.FirstAvailable) == 0 { - if status := pl.validateDeviceClass(logger, request.DeviceClassName, request.Name); status != nil { - return nil, status - } - } else { - if !pl.enablePrioritizedList { - return nil, statusUnschedulable(logger, fmt.Sprintf("resource claim %s, request %s: has subrequests, but the DRAPrioritizedList feature is disabled", klog.KObj(claim), request.Name)) - } - for _, subRequest := range request.FirstAvailable { - qualRequestName := strings.Join([]string{request.Name, subRequest.Name}, "/") - if status := pl.validateDeviceClass(logger, subRequest.DeviceClassName, qualRequestName); status != nil { - return nil, status - } - } - } - } - } - } - - if len(allocateClaims) > 0 { - logger.V(5).Info("Preparing allocation with structured parameters", "pod", klog.KObj(pod), "resourceclaims", klog.KObjSlice(allocateClaims)) - - // Doing this over and over again for each pod could be avoided - // by setting the allocator up once and then keeping it up-to-date - // as changes are observed. - // - // But that would cause problems for using the plugin in the - // Cluster Autoscaler. If this step here turns out to be - // expensive, we may have to maintain and update state more - // persistently. - // - // Claims (and thus their devices) are treated as "allocated" if they are in the assume cache - // or currently their allocation is in-flight. This does not change - // during filtering, so we can determine that once. - allAllocatedDevices, err := pl.draManager.ResourceClaims().ListAllAllocatedDevices() - if err != nil { - return nil, statusError(logger, err) - } - slices, err := pl.draManager.ResourceSlices().ListWithDeviceTaintRules() - if err != nil { - return nil, statusError(logger, err) - } - features := structured.Features{ - AdminAccess: pl.enableAdminAccess, - PrioritizedList: pl.enablePrioritizedList, - PartitionableDevices: pl.enablePartitionableDevices, - DeviceTaints: pl.enableDeviceTaints, - } - allocator, err := structured.NewAllocator(ctx, features, allocateClaims, allAllocatedDevices, pl.draManager.DeviceClasses(), slices, pl.celCache) - if err != nil { - return nil, statusError(logger, err) - } - s.allocator = allocator - s.nodeAllocations = make(map[string][]resourceapi.AllocationResult) - } - - s.claims = claims - return nil, nil -} - -func (pl *DynamicResources) validateDeviceClass(logger klog.Logger, deviceClassName, requestName string) *framework.Status { - if deviceClassName == "" { - return statusError(logger, fmt.Errorf("request %s: unsupported request type", requestName)) - } - - _, err := pl.draManager.DeviceClasses().Get(deviceClassName) - if err != nil { - // If the class cannot be retrieved, allocation cannot proceed. - if apierrors.IsNotFound(err) { - // Here we mark the pod as "unschedulable", so it'll sleep in - // the unscheduleable queue until a DeviceClass event occurs. - return statusUnschedulable(logger, fmt.Sprintf("request %s: device class %s does not exist", requestName, deviceClassName)) - } - } - return nil -} - -// PreFilterExtensions returns prefilter extensions, pod add and remove. -func (pl *DynamicResources) PreFilterExtensions() framework.PreFilterExtensions { - return nil -} - -func getStateData(cs *framework.CycleState) (*stateData, error) { - state, err := cs.Read(stateKey) - if err != nil { - return nil, err - } - s, ok := state.(*stateData) - if !ok { - return nil, errors.New("unable to convert state into stateData") - } - return s, nil -} - -// Filter invoked at the filter extension point. -// It evaluates if a pod can fit due to the resources it requests, -// for both allocated and unallocated claims. -// -// For claims that are bound, then it checks that the node affinity is -// satisfied by the given node. -// -// For claims that are unbound, it checks whether the claim might get allocated -// for the node. -func (pl *DynamicResources) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - if !pl.enabled { - return nil - } - state, err := getStateData(cs) - if err != nil { - return statusError(klog.FromContext(ctx), err) - } - if len(state.claims) == 0 { - return nil - } - - logger := klog.FromContext(ctx) - node := nodeInfo.Node() - - var unavailableClaims []int - for index, claim := range state.claims { - logger.V(10).Info("filtering based on resource claims of the pod", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim)) - - // This node selector only gets set if the claim is allocated. - if nodeSelector := state.informationsForClaim[index].availableOnNodes; nodeSelector != nil && !nodeSelector.Match(node) { - logger.V(5).Info("allocation's node selector does not match", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim)) - unavailableClaims = append(unavailableClaims, index) - } - } - - // Use allocator to check the node and cache the result in case that the node is picked. - var allocations []resourceapi.AllocationResult - if state.allocator != nil { - allocCtx := ctx - if loggerV := logger.V(5); loggerV.Enabled() { - allocCtx = klog.NewContext(allocCtx, klog.LoggerWithValues(logger, "node", klog.KObj(node))) - } - - a, err := state.allocator.Allocate(allocCtx, node) - if err != nil { - // This should only fail if there is something wrong with the claim or class. - // Return an error to abort scheduling of it. - // - // This will cause retries. It would be slightly nicer to mark it as unschedulable - // *and* abort scheduling. Then only cluster event for updating the claim or class - // with the broken CEL expression would trigger rescheduling. - // - // But we cannot do both. As this shouldn't occur often, aborting like this is - // better than the more complicated alternative (return Unschedulable here, remember - // the error, then later raise it again later if needed). - return statusError(logger, err, "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaims", klog.KObjSlice(state.allocator.ClaimsToAllocate())) - } - // Check for exact length just to be sure. In practice this is all-or-nothing. - if len(a) != len(state.allocator.ClaimsToAllocate()) { - return statusUnschedulable(logger, "cannot allocate all claims", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaims", klog.KObjSlice(state.allocator.ClaimsToAllocate())) - } - // Reserve uses this information. - allocations = a - } - - // Store information in state while holding the mutex. - if state.allocator != nil || len(unavailableClaims) > 0 { - state.mutex.Lock() - defer state.mutex.Unlock() - } - - if len(unavailableClaims) > 0 { - // Remember all unavailable claims. This might be observed - // concurrently, so we have to lock the state before writing. - - if state.unavailableClaims == nil { - state.unavailableClaims = sets.New[int]() - } - - for _, index := range unavailableClaims { - state.unavailableClaims.Insert(index) - } - return statusUnschedulable(logger, "resourceclaim not available on the node", "pod", klog.KObj(pod)) - } - - if state.allocator != nil { - state.nodeAllocations[node.Name] = allocations - } - - return nil -} - -// PostFilter checks whether there are allocated claims that could get -// deallocated to help get the Pod schedulable. If yes, it picks one and -// requests its deallocation. This only gets called when filtering found no -// suitable node. -func (pl *DynamicResources) PostFilter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) { - if !pl.enabled { - return nil, framework.NewStatus(framework.Unschedulable, "plugin disabled") - } - // If a Pod doesn't have any resource claims attached to it, there is no need for further processing. - // Thus we provide a fast path for this case to avoid unnecessary computations. - if len(pod.Spec.ResourceClaims) == 0 { - return nil, framework.NewStatus(framework.Unschedulable) - } - logger := klog.FromContext(ctx) - state, err := getStateData(cs) - if err != nil { - return nil, statusError(logger, err) - } - if len(state.claims) == 0 { - return nil, framework.NewStatus(framework.Unschedulable, "no new claims to deallocate") - } - - // Iterating over a map is random. This is intentional here, we want to - // pick one claim randomly because there is no better heuristic. - for index := range state.unavailableClaims { - claim := state.claims[index] - if len(claim.Status.ReservedFor) == 0 || - len(claim.Status.ReservedFor) == 1 && claim.Status.ReservedFor[0].UID == pod.UID { - claim := claim.DeepCopy() - claim.Status.ReservedFor = nil - claim.Status.Allocation = nil - logger.V(5).Info("Deallocation of ResourceClaim", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim)) - if _, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil { - return nil, statusError(logger, err) - } - return nil, framework.NewStatus(framework.Unschedulable, "deallocation of ResourceClaim completed") - } - } - return nil, framework.NewStatus(framework.Unschedulable, "still not schedulable") -} - -// Reserve reserves claims for the pod. -func (pl *DynamicResources) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) { - if !pl.enabled { - return nil - } - state, err := getStateData(cs) - if err != nil { - return statusError(klog.FromContext(ctx), err) - } - if len(state.claims) == 0 { - return nil - } - - logger := klog.FromContext(ctx) - - numClaimsWithAllocator := 0 - for _, claim := range state.claims { - if claim.Status.Allocation != nil { - // Allocated, but perhaps not reserved yet. We checked in PreFilter that - // the pod could reserve the claim. Instead of reserving here by - // updating the ResourceClaim status, we assume that reserving - // will work and only do it for real during binding. If it fails at - // that time, some other pod was faster and we have to try again. - continue - } - - numClaimsWithAllocator++ - } - - if numClaimsWithAllocator == 0 { - // Nothing left to do. - return nil - } - - // Prepare allocation of claims handled by the schedulder. - if state.allocator != nil { - // Entries in these two slices match each other. - claimsToAllocate := state.allocator.ClaimsToAllocate() - allocations, ok := state.nodeAllocations[nodeName] - if !ok { - // We checked before that the node is suitable. This shouldn't have failed, - // so treat this as an error. - return statusError(logger, errors.New("claim allocation not found for node")) - } - - // Sanity check: do we have results for all pending claims? - if len(allocations) != len(claimsToAllocate) || - len(allocations) != numClaimsWithAllocator { - return statusError(logger, fmt.Errorf("internal error, have %d allocations, %d claims to allocate, want %d claims", len(allocations), len(claimsToAllocate), numClaimsWithAllocator)) - } - - for i, claim := range claimsToAllocate { - index := slices.Index(state.claims, claim) - if index < 0 { - return statusError(logger, fmt.Errorf("internal error, claim %s with allocation not found", claim.Name)) - } - allocation := &allocations[i] - state.informationsForClaim[index].allocation = allocation - - // Strictly speaking, we don't need to store the full modified object. - // The allocation would be enough. The full object is useful for - // debugging, testing and the allocator, so let's make it realistic. - claim = claim.DeepCopy() - if !slices.Contains(claim.Finalizers, resourceapi.Finalizer) { - claim.Finalizers = append(claim.Finalizers, resourceapi.Finalizer) - } - claim.Status.Allocation = allocation - err := pl.draManager.ResourceClaims().SignalClaimPendingAllocation(claim.UID, claim) - if err != nil { - return statusError(logger, fmt.Errorf("internal error, couldn't signal allocation for claim %s", claim.Name)) - } - logger.V(5).Info("Reserved resource in allocation result", "claim", klog.KObj(claim), "allocation", klog.Format(allocation)) - } - } - - return nil -} - -// Unreserve clears the ReservedFor field for all claims. -// It's idempotent, and does nothing if no state found for the given pod. -func (pl *DynamicResources) Unreserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) { - if !pl.enabled { - return - } - state, err := getStateData(cs) - if err != nil { - return - } - if len(state.claims) == 0 { - return - } - - logger := klog.FromContext(ctx) - - for index, claim := range state.claims { - // If allocation was in-flight, then it's not anymore and we need to revert the - // claim object in the assume cache to what it was before. - if deleted := pl.draManager.ResourceClaims().RemoveClaimPendingAllocation(state.claims[index].UID); deleted { - pl.draManager.ResourceClaims().AssumedClaimRestore(claim.Namespace, claim.Name) - } - - if claim.Status.Allocation != nil && - resourceclaim.IsReservedForPod(pod, claim) { - // Remove pod from ReservedFor. A strategic-merge-patch is used - // because that allows removing an individual entry without having - // the latest slice. - patch := fmt.Sprintf(`{"metadata": {"uid": %q}, "status": { "reservedFor": [ {"$patch": "delete", "uid": %q} ] }}`, - claim.UID, - pod.UID, - ) - logger.V(5).Info("unreserve", "resourceclaim", klog.KObj(claim), "pod", klog.KObj(pod)) - claim, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).Patch(ctx, claim.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "status") - if err != nil { - // We will get here again when pod scheduling is retried. - logger.Error(err, "unreserve", "resourceclaim", klog.KObj(claim)) - } - } - } -} - -// PreBind gets called in a separate goroutine after it has been determined -// that the pod should get bound to this node. Because Reserve did not actually -// reserve claims, we need to do it now. For claims with the builtin controller, -// we also handle the allocation. -// -// If anything fails, we return an error and -// the pod will have to go into the backoff queue. The scheduler will call -// Unreserve as part of the error handling. -func (pl *DynamicResources) PreBind(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status { - if !pl.enabled { - return nil - } - state, err := getStateData(cs) - if err != nil { - return statusError(klog.FromContext(ctx), err) - } - if len(state.claims) == 0 { - return nil - } - - logger := klog.FromContext(ctx) - - for index, claim := range state.claims { - if !resourceclaim.IsReservedForPod(pod, claim) { - claim, err := pl.bindClaim(ctx, state, index, pod, nodeName) - if err != nil { - return statusError(logger, err) - } - state.claims[index] = claim - } - } - // If we get here, we know that reserving the claim for - // the pod worked and we can proceed with binding it. - return nil -} - -// bindClaim gets called by PreBind for claim which is not reserved for the pod yet. -// It might not even be allocated. bindClaim then ensures that the allocation -// and reservation are recorded. This finishes the work started in Reserve. -func (pl *DynamicResources) bindClaim(ctx context.Context, state *stateData, index int, pod *v1.Pod, nodeName string) (patchedClaim *resourceapi.ResourceClaim, finalErr error) { - logger := klog.FromContext(ctx) - claim := state.claims[index].DeepCopy() - allocation := state.informationsForClaim[index].allocation - defer func() { - if allocation != nil { - // The scheduler was handling allocation. Now that has - // completed, either successfully or with a failure. - if finalErr == nil { - // This can fail, but only for reasons that are okay (concurrent delete or update). - // Shouldn't happen in this case. - if err := pl.draManager.ResourceClaims().AssumeClaimAfterAPICall(claim); err != nil { - logger.V(5).Info("Claim not stored in assume cache", "err", finalErr) - } - } - pl.draManager.ResourceClaims().RemoveClaimPendingAllocation(claim.UID) - } - }() - - logger.V(5).Info("preparing claim status update", "claim", klog.KObj(state.claims[index]), "allocation", klog.Format(allocation)) - - // We may run into a ResourceVersion conflict because there may be some - // benign concurrent changes. In that case we get the latest claim and - // try again. - refreshClaim := false - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if refreshClaim { - updatedClaim, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("get updated claim %s after conflict: %w", klog.KObj(claim), err) - } - logger.V(5).Info("retrying update after conflict", "claim", klog.KObj(claim)) - claim = updatedClaim - } else { - // All future retries must get a new claim first. - refreshClaim = true - } - - if claim.DeletionTimestamp != nil { - return fmt.Errorf("claim %s got deleted in the meantime", klog.KObj(claim)) - } - - // Do we need to store an allocation result from Reserve? - if allocation != nil { - if claim.Status.Allocation != nil { - return fmt.Errorf("claim %s got allocated elsewhere in the meantime", klog.KObj(claim)) - } - - // The finalizer needs to be added in a normal update. - // If we were interrupted in the past, it might already be set and we simply continue. - if !slices.Contains(claim.Finalizers, resourceapi.Finalizer) { - claim.Finalizers = append(claim.Finalizers, resourceapi.Finalizer) - updatedClaim, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("add finalizer to claim %s: %w", klog.KObj(claim), err) - } - claim = updatedClaim - } - claim.Status.Allocation = allocation - } - - // We can simply try to add the pod here without checking - // preconditions. The apiserver will tell us with a - // non-conflict error if this isn't possible. - claim.Status.ReservedFor = append(claim.Status.ReservedFor, resourceapi.ResourceClaimConsumerReference{Resource: "pods", Name: pod.Name, UID: pod.UID}) - updatedClaim, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - if allocation != nil { - return fmt.Errorf("add allocation and reservation to claim %s: %w", klog.KObj(claim), err) - } - return fmt.Errorf("add reservation to claim %s: %w", klog.KObj(claim), err) - } - claim = updatedClaim - return nil - }) - - if retryErr != nil { - return nil, retryErr - } - - logger.V(5).Info("reserved", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}, "resourceclaim", klog.Format(claim)) - return claim, nil -} - -// statusUnschedulable ensures that there is a log message associated with the -// line where the status originated. -func statusUnschedulable(logger klog.Logger, reason string, kv ...interface{}) *framework.Status { - if loggerV := logger.V(5); loggerV.Enabled() { - helper, loggerV := loggerV.WithCallStackHelper() - helper() - kv = append(kv, "reason", reason) - // nolint: logcheck // warns because it cannot check key/values - loggerV.Info("pod unschedulable", kv...) - } - return framework.NewStatus(framework.UnschedulableAndUnresolvable, reason) -} - -// statusError ensures that there is a log message associated with the -// line where the error originated. -func statusError(logger klog.Logger, err error, kv ...interface{}) *framework.Status { - if loggerV := logger.V(5); loggerV.Enabled() { - helper, loggerV := loggerV.WithCallStackHelper() - helper() - // nolint: logcheck // warns because it cannot check key/values - loggerV.Error(err, "dynamic resource plugin failed", kv...) - } - return framework.AsStatus(err) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature/feature.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature/feature.go deleted file mode 100644 index 7e7f1a1b8d8..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature/feature.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package feature - -// Features carries feature gate values used by various plugins. -// This struct allows us to break the dependency of the plugins on -// the internal k8s features pkg. -type Features struct { - EnableDRAPrioritizedList bool - EnableDRAAdminAccess bool - EnableDRADeviceTaints bool - EnableDynamicResourceAllocation bool - EnableVolumeAttributesClass bool - EnableCSIMigrationPortworx bool - EnableNodeInclusionPolicyInPodTopologySpread bool - EnableMatchLabelKeysInPodTopologySpread bool - EnableInPlacePodVerticalScaling bool - EnableSidecarContainers bool - EnableSchedulingQueueHint bool - EnableAsyncPreemption bool - EnablePodLevelResources bool - EnablePartitionableDevices bool - EnableStorageCapacityScoring bool -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/normalize_score.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/normalize_score.go deleted file mode 100644 index 3d35ca30493..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/normalize_score.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// DefaultNormalizeScore generates a Normalize Score function that can normalize the -// scores from [0, max(scores)] to [0, maxPriority]. If reverse is set to true, it -// reverses the scores by subtracting it from maxPriority. -// Note: The input scores are always assumed to be non-negative integers. -func DefaultNormalizeScore(maxPriority int64, reverse bool, scores framework.NodeScoreList) *framework.Status { - var maxCount int64 - for i := range scores { - if scores[i].Score > maxCount { - maxCount = scores[i].Score - } - } - - if maxCount == 0 { - if reverse { - for i := range scores { - scores[i].Score = maxPriority - } - } - return nil - } - - for i := range scores { - score := scores[i].Score - - score = maxPriority * score / maxCount - if reverse { - score = maxPriority - score - } - - scores[i].Score = score - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/shape_score.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/shape_score.go deleted file mode 100644 index f12488b14c8..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/shape_score.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -// FunctionShape represents a collection of FunctionShapePoint. -type FunctionShape []FunctionShapePoint - -// FunctionShapePoint represents a shape point. -type FunctionShapePoint struct { - // Utilization is function argument. - Utilization int64 - // Score is function value. - Score int64 -} - -// BuildBrokenLinearFunction creates a function which is built using linear segments. Segments are defined via shape array. -// Shape[i].Utilization slice represents points on "Utilization" axis where different segments meet. -// Shape[i].Score represents function values at meeting points. -// -// function f(p) is defined as: -// -// shape[0].Score for p < shape[0].Utilization -// shape[n-1].Score for p > shape[n-1].Utilization -// -// and linear between points (p < shape[i].Utilization) -func BuildBrokenLinearFunction(shape FunctionShape) func(int64) int64 { - return func(p int64) int64 { - for i := 0; i < len(shape); i++ { - if p <= int64(shape[i].Utilization) { - if i == 0 { - return shape[0].Score - } - return shape[i-1].Score + (shape[i].Score-shape[i-1].Score)*(p-shape[i-1].Utilization)/(shape[i].Utilization-shape[i-1].Utilization) - } - } - return shape[len(shape)-1].Score - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/spread.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/spread.go deleted file mode 100644 index 636bcaee4cc..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/spread.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - appslisters "k8s.io/client-go/listers/apps/v1" - corelisters "k8s.io/client-go/listers/core/v1" -) - -var ( - rcKind = v1.SchemeGroupVersion.WithKind("ReplicationController") - rsKind = appsv1.SchemeGroupVersion.WithKind("ReplicaSet") - ssKind = appsv1.SchemeGroupVersion.WithKind("StatefulSet") -) - -// DefaultSelector returns a selector deduced from the Services, Replication -// Controllers, Replica Sets, and Stateful Sets matching the given pod. -func DefaultSelector( - pod *v1.Pod, - sl corelisters.ServiceLister, - cl corelisters.ReplicationControllerLister, - rsl appslisters.ReplicaSetLister, - ssl appslisters.StatefulSetLister, -) labels.Selector { - labelSet := make(labels.Set) - // Since services, RCs, RSs and SSs match the pod, they won't have conflicting - // labels. Merging is safe. - - if services, err := GetPodServices(sl, pod); err == nil { - for _, service := range services { - labelSet = labels.Merge(labelSet, service.Spec.Selector) - } - } - selector := labelSet.AsSelector() - - owner := metav1.GetControllerOfNoCopy(pod) - if owner == nil { - return selector - } - - gv, err := schema.ParseGroupVersion(owner.APIVersion) - if err != nil { - return selector - } - - gvk := gv.WithKind(owner.Kind) - switch gvk { - case rcKind: - if rc, err := cl.ReplicationControllers(pod.Namespace).Get(owner.Name); err == nil { - labelSet = labels.Merge(labelSet, rc.Spec.Selector) - selector = labelSet.AsSelector() - } - case rsKind: - if rs, err := rsl.ReplicaSets(pod.Namespace).Get(owner.Name); err == nil { - if other, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil { - if r, ok := other.Requirements(); ok { - selector = selector.Add(r...) - } - } - } - case ssKind: - if ss, err := ssl.StatefulSets(pod.Namespace).Get(owner.Name); err == nil { - if other, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { - if r, ok := other.Requirements(); ok { - selector = selector.Add(r...) - } - } - } - default: - // Not owned by a supported controller. - } - - return selector -} - -// GetPodServices gets the services that have the selector that match the labels on the given pod. -func GetPodServices(sl corelisters.ServiceLister, pod *v1.Pod) ([]*v1.Service, error) { - allServices, err := sl.Services(pod.Namespace).List(labels.Everything()) - if err != nil { - return nil, err - } - - var services []*v1.Service - for i := range allServices { - service := allServices[i] - if service.Spec.Selector == nil { - // services with nil selectors match nothing, not everything. - continue - } - selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated() - if selector.Matches(labels.Set(pod.Labels)) { - services = append(services, service) - } - } - - return services, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/taint.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/taint.go deleted file mode 100644 index 7502d9ce44f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper/taint.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import v1 "k8s.io/api/core/v1" - -// DoNotScheduleTaintsFilterFunc returns the filter function that can -// filter out the node taints that reject scheduling Pod on a Node. -func DoNotScheduleTaintsFilterFunc() func(t *v1.Taint) bool { - return func(t *v1.Taint) bool { - // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. - return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/image_locality.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/image_locality.go deleted file mode 100644 index 694d457c754..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality/image_locality.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package imagelocality - -import ( - "context" - "strings" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" -) - -// The two thresholds are used as bounds for the image score range. They correspond to a reasonable size range for -// container images compressed and stored in registries; 90%ile of images on dockerhub drops into this range. -const ( - mb int64 = 1024 * 1024 - minThreshold int64 = 23 * mb - maxContainerThreshold int64 = 1000 * mb -) - -// ImageLocality is a score plugin that favors nodes that already have requested pod container's images. -type ImageLocality struct { - handle framework.Handle -} - -var _ framework.ScorePlugin = &ImageLocality{} - -// Name is the name of the plugin used in the plugin registry and configurations. -const Name = names.ImageLocality - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *ImageLocality) Name() string { - return Name -} - -// Score invoked at the score extension point. -func (pl *ImageLocality) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - nodeInfos, err := pl.handle.SnapshotSharedLister().NodeInfos().List() - if err != nil { - return 0, framework.AsStatus(err) - } - totalNumNodes := len(nodeInfos) - - imageScores := sumImageScores(nodeInfo, pod, totalNumNodes) - score := calculatePriority(imageScores, len(pod.Spec.InitContainers)+len(pod.Spec.Containers)) - - return score, nil -} - -// ScoreExtensions of the Score plugin. -func (pl *ImageLocality) ScoreExtensions() framework.ScoreExtensions { - return nil -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, _ runtime.Object, h framework.Handle) (framework.Plugin, error) { - return &ImageLocality{handle: h}, nil -} - -// calculatePriority returns the priority of a node. Given the sumScores of requested images on the node, the node's -// priority is obtained by scaling the maximum priority value with a ratio proportional to the sumScores. -func calculatePriority(sumScores int64, numContainers int) int64 { - maxThreshold := maxContainerThreshold * int64(numContainers) - if sumScores < minThreshold { - sumScores = minThreshold - } else if sumScores > maxThreshold { - sumScores = maxThreshold - } - - return framework.MaxNodeScore * (sumScores - minThreshold) / (maxThreshold - minThreshold) -} - -// sumImageScores returns the sum of image scores of all the containers that are already on the node. -// Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate -// the final score. -func sumImageScores(nodeInfo *framework.NodeInfo, pod *v1.Pod, totalNumNodes int) int64 { - var sum int64 - for _, container := range pod.Spec.InitContainers { - if state, ok := nodeInfo.ImageStates[normalizedImageName(container.Image)]; ok { - sum += scaledImageScore(state, totalNumNodes) - } - } - for _, container := range pod.Spec.Containers { - if state, ok := nodeInfo.ImageStates[normalizedImageName(container.Image)]; ok { - sum += scaledImageScore(state, totalNumNodes) - } - } - return sum -} - -// scaledImageScore returns an adaptively scaled score for the given state of an image. -// The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to. -// This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or -// a few nodes due to image locality. -func scaledImageScore(imageState *framework.ImageStateSummary, totalNumNodes int) int64 { - spread := float64(imageState.NumNodes) / float64(totalNumNodes) - return int64(float64(imageState.Size) * spread) -} - -// normalizedImageName returns the CRI compliant name for a given image. -// TODO: cover the corner cases of missed matches, e.g, -// 1. Using Docker as runtime and docker.io/library/test:tag in pod spec, but only test:tag will present in node status -// 2. Using the implicit registry, i.e., test:tag or library/test:tag in pod spec but only docker.io/library/test:tag -// in node status; note that if users consistently use one registry format, this should not happen. -func normalizedImageName(name string) string { - if strings.LastIndex(name, ":") <= strings.LastIndex(name, "/") { - name = name + ":latest" - } - return name -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go deleted file mode 100644 index 6d5106f02e2..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go +++ /dev/null @@ -1,435 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package interpodaffinity - -import ( - "context" - "fmt" - "sync/atomic" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -const ( - // preFilterStateKey is the key in CycleState to InterPodAffinity pre-computed data for Filtering. - // Using the name of the plugin will likely help us avoid collisions with other plugins. - preFilterStateKey = "PreFilter" + Name - - // ErrReasonExistingAntiAffinityRulesNotMatch is used for ExistingPodsAntiAffinityRulesNotMatch predicate error. - ErrReasonExistingAntiAffinityRulesNotMatch = "node(s) didn't satisfy existing pods anti-affinity rules" - // ErrReasonAffinityRulesNotMatch is used for PodAffinityRulesNotMatch predicate error. - ErrReasonAffinityRulesNotMatch = "node(s) didn't match pod affinity rules" - // ErrReasonAntiAffinityRulesNotMatch is used for PodAntiAffinityRulesNotMatch predicate error. - ErrReasonAntiAffinityRulesNotMatch = "node(s) didn't match pod anti-affinity rules" -) - -// preFilterState computed at PreFilter and used at Filter. -type preFilterState struct { - // A map of topology pairs to the number of existing pods that has anti-affinity terms that match the "pod". - existingAntiAffinityCounts topologyToMatchedTermCount - // A map of topology pairs to the number of existing pods that match the affinity terms of the "pod". - affinityCounts topologyToMatchedTermCount - // A map of topology pairs to the number of existing pods that match the anti-affinity terms of the "pod". - antiAffinityCounts topologyToMatchedTermCount - // podInfo of the incoming pod. - podInfo *framework.PodInfo - // A copy of the incoming pod's namespace labels. - namespaceLabels labels.Set -} - -// Clone the prefilter state. -func (s *preFilterState) Clone() framework.StateData { - if s == nil { - return nil - } - - copy := preFilterState{} - copy.affinityCounts = s.affinityCounts.clone() - copy.antiAffinityCounts = s.antiAffinityCounts.clone() - copy.existingAntiAffinityCounts = s.existingAntiAffinityCounts.clone() - // No need to deep copy the podInfo because it shouldn't change. - copy.podInfo = s.podInfo - copy.namespaceLabels = s.namespaceLabels - return © -} - -// updateWithPod updates the preFilterState counters with the (anti)affinity matches for the given podInfo. -func (s *preFilterState) updateWithPod(pInfo *framework.PodInfo, node *v1.Node, multiplier int64) { - if s == nil { - return - } - - s.existingAntiAffinityCounts.updateWithAntiAffinityTerms(pInfo.RequiredAntiAffinityTerms, s.podInfo.Pod, s.namespaceLabels, node, multiplier) - s.affinityCounts.updateWithAffinityTerms(s.podInfo.RequiredAffinityTerms, pInfo.Pod, node, multiplier) - // The incoming pod's terms have the namespaceSelector merged into the namespaces, and so - // here we don't lookup the updated pod's namespace labels, hence passing nil for nsLabels. - s.antiAffinityCounts.updateWithAntiAffinityTerms(s.podInfo.RequiredAntiAffinityTerms, pInfo.Pod, nil, node, multiplier) -} - -type topologyPair struct { - key string - value string -} -type topologyToMatchedTermCount map[topologyPair]int64 - -func (m topologyToMatchedTermCount) merge(toMerge topologyToMatchedTermCount) { - for pair, count := range toMerge { - m[pair] += count - } -} - -func (m topologyToMatchedTermCount) mergeWithList(toMerge topologyToMatchedTermCountList) { - for _, tmtc := range toMerge { - m[tmtc.topologyPair] += tmtc.count - } -} - -func (m topologyToMatchedTermCount) clone() topologyToMatchedTermCount { - copy := make(topologyToMatchedTermCount, len(m)) - copy.merge(m) - return copy -} - -func (m topologyToMatchedTermCount) update(node *v1.Node, tk string, value int64) { - if tv, ok := node.Labels[tk]; ok { - pair := topologyPair{key: tk, value: tv} - m[pair] += value - // value could be negative, hence we delete the entry if it is down to zero. - if m[pair] == 0 { - delete(m, pair) - } - } -} - -// updates the topologyToMatchedTermCount map with the specified value -// for each affinity term if "targetPod" matches ALL terms. -func (m topologyToMatchedTermCount) updateWithAffinityTerms( - terms []framework.AffinityTerm, pod *v1.Pod, node *v1.Node, value int64) { - if podMatchesAllAffinityTerms(terms, pod) { - for _, t := range terms { - m.update(node, t.TopologyKey, value) - } - } -} - -// updates the topologyToMatchedTermCount map with the specified value -// for each anti-affinity term matched the target pod. -func (m topologyToMatchedTermCount) updateWithAntiAffinityTerms(terms []framework.AffinityTerm, pod *v1.Pod, nsLabels labels.Set, node *v1.Node, value int64) { - // Check anti-affinity terms. - for _, t := range terms { - if t.Matches(pod, nsLabels) { - m.update(node, t.TopologyKey, value) - } - } -} - -// topologyToMatchedTermCountList is a slice equivalent of topologyToMatchedTermCount map. -// The use of slice improves the performance of PreFilter, -// especially due to faster iteration when merging than with topologyToMatchedTermCount. -type topologyToMatchedTermCountList []topologyPairCount - -type topologyPairCount struct { - topologyPair topologyPair - count int64 -} - -func (m *topologyToMatchedTermCountList) append(node *v1.Node, tk string, value int64) { - if tv, ok := node.Labels[tk]; ok { - pair := topologyPair{key: tk, value: tv} - *m = append(*m, topologyPairCount{ - topologyPair: pair, - count: value, - }) - } -} - -// appends the specified value to the topologyToMatchedTermCountList -// for each affinity term if "targetPod" matches ALL terms. -func (m *topologyToMatchedTermCountList) appendWithAffinityTerms( - terms []framework.AffinityTerm, pod *v1.Pod, node *v1.Node, value int64) { - if podMatchesAllAffinityTerms(terms, pod) { - for _, t := range terms { - m.append(node, t.TopologyKey, value) - } - } -} - -// appends the specified value to the topologyToMatchedTermCountList -// for each anti-affinity term matched the target pod. -func (m *topologyToMatchedTermCountList) appendWithAntiAffinityTerms(terms []framework.AffinityTerm, pod *v1.Pod, nsLabels labels.Set, node *v1.Node, value int64) { - // Check anti-affinity terms. - for _, t := range terms { - if t.Matches(pod, nsLabels) { - m.append(node, t.TopologyKey, value) - } - } -} - -// returns true IFF the given pod matches all the given terms. -func podMatchesAllAffinityTerms(terms []framework.AffinityTerm, pod *v1.Pod) bool { - if len(terms) == 0 { - return false - } - for _, t := range terms { - // The incoming pod NamespaceSelector was merged into the Namespaces set, and so - // we are not explicitly passing in namespace labels. - if !t.Matches(pod, nil) { - return false - } - } - return true -} - -// calculates the following for each existing pod on each node: -// 1. Whether it has PodAntiAffinity -// 2. Whether any AntiAffinityTerm matches the incoming pod -func (pl *InterPodAffinity) getExistingAntiAffinityCounts(ctx context.Context, pod *v1.Pod, nsLabels labels.Set, nodes []*framework.NodeInfo) topologyToMatchedTermCount { - antiAffinityCountsList := make([]topologyToMatchedTermCountList, len(nodes)) - index := int32(-1) - processNode := func(i int) { - nodeInfo := nodes[i] - node := nodeInfo.Node() - - antiAffinityCounts := make(topologyToMatchedTermCountList, 0) - for _, existingPod := range nodeInfo.PodsWithRequiredAntiAffinity { - antiAffinityCounts.appendWithAntiAffinityTerms(existingPod.RequiredAntiAffinityTerms, pod, nsLabels, node, 1) - } - if len(antiAffinityCounts) != 0 { - antiAffinityCountsList[atomic.AddInt32(&index, 1)] = antiAffinityCounts - } - } - pl.parallelizer.Until(ctx, len(nodes), processNode, pl.Name()) - - result := make(topologyToMatchedTermCount) - // Traditional for loop is slightly faster in this case than its "for range" equivalent. - for i := 0; i <= int(index); i++ { - result.mergeWithList(antiAffinityCountsList[i]) - } - - return result -} - -// finds existing Pods that match affinity terms of the incoming pod's (anti)affinity terms. -// It returns a topologyToMatchedTermCount that are checked later by the affinity -// predicate. With this topologyToMatchedTermCount available, the affinity predicate does not -// need to check all the pods in the cluster. -func (pl *InterPodAffinity) getIncomingAffinityAntiAffinityCounts(ctx context.Context, podInfo *framework.PodInfo, allNodes []*framework.NodeInfo) (topologyToMatchedTermCount, topologyToMatchedTermCount) { - affinityCounts := make(topologyToMatchedTermCount) - antiAffinityCounts := make(topologyToMatchedTermCount) - if len(podInfo.RequiredAffinityTerms) == 0 && len(podInfo.RequiredAntiAffinityTerms) == 0 { - return affinityCounts, antiAffinityCounts - } - - affinityCountsList := make([]topologyToMatchedTermCountList, len(allNodes)) - antiAffinityCountsList := make([]topologyToMatchedTermCountList, len(allNodes)) - index := int32(-1) - processNode := func(i int) { - nodeInfo := allNodes[i] - node := nodeInfo.Node() - - affinity := make(topologyToMatchedTermCountList, 0) - antiAffinity := make(topologyToMatchedTermCountList, 0) - for _, existingPod := range nodeInfo.Pods { - affinity.appendWithAffinityTerms(podInfo.RequiredAffinityTerms, existingPod.Pod, node, 1) - // The incoming pod's terms have the namespaceSelector merged into the namespaces, and so - // here we don't lookup the existing pod's namespace labels, hence passing nil for nsLabels. - antiAffinity.appendWithAntiAffinityTerms(podInfo.RequiredAntiAffinityTerms, existingPod.Pod, nil, node, 1) - } - - if len(affinity) > 0 || len(antiAffinity) > 0 { - k := atomic.AddInt32(&index, 1) - affinityCountsList[k] = affinity - antiAffinityCountsList[k] = antiAffinity - } - } - pl.parallelizer.Until(ctx, len(allNodes), processNode, pl.Name()) - - for i := 0; i <= int(index); i++ { - affinityCounts.mergeWithList(affinityCountsList[i]) - antiAffinityCounts.mergeWithList(antiAffinityCountsList[i]) - } - - return affinityCounts, antiAffinityCounts -} - -// PreFilter invoked at the prefilter extension point. -func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - var allNodes []*framework.NodeInfo - var nodesWithRequiredAntiAffinityPods []*framework.NodeInfo - var err error - if allNodes, err = pl.sharedLister.NodeInfos().List(); err != nil { - return nil, framework.AsStatus(fmt.Errorf("failed to list NodeInfos: %w", err)) - } - if nodesWithRequiredAntiAffinityPods, err = pl.sharedLister.NodeInfos().HavePodsWithRequiredAntiAffinityList(); err != nil { - return nil, framework.AsStatus(fmt.Errorf("failed to list NodeInfos with pods with affinity: %w", err)) - } - - s := &preFilterState{} - - if s.podInfo, err = framework.NewPodInfo(pod); err != nil { - return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("parsing pod: %+v", err)) - } - - for i := range s.podInfo.RequiredAffinityTerms { - if err := pl.mergeAffinityTermNamespacesIfNotEmpty(&s.podInfo.RequiredAffinityTerms[i]); err != nil { - return nil, framework.AsStatus(err) - } - } - for i := range s.podInfo.RequiredAntiAffinityTerms { - if err := pl.mergeAffinityTermNamespacesIfNotEmpty(&s.podInfo.RequiredAntiAffinityTerms[i]); err != nil { - return nil, framework.AsStatus(err) - } - } - logger := klog.FromContext(ctx) - s.namespaceLabels = GetNamespaceLabelsSnapshot(logger, pod.Namespace, pl.nsLister) - - s.existingAntiAffinityCounts = pl.getExistingAntiAffinityCounts(ctx, pod, s.namespaceLabels, nodesWithRequiredAntiAffinityPods) - s.affinityCounts, s.antiAffinityCounts = pl.getIncomingAffinityAntiAffinityCounts(ctx, s.podInfo, allNodes) - - if len(s.existingAntiAffinityCounts) == 0 && len(s.podInfo.RequiredAffinityTerms) == 0 && len(s.podInfo.RequiredAntiAffinityTerms) == 0 { - return nil, framework.NewStatus(framework.Skip) - } - - cycleState.Write(preFilterStateKey, s) - return nil, nil -} - -// PreFilterExtensions returns prefilter extensions, pod add and remove. -func (pl *InterPodAffinity) PreFilterExtensions() framework.PreFilterExtensions { - return pl -} - -// AddPod from pre-computed data in cycleState. -func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - state, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - state.updateWithPod(podInfoToAdd, nodeInfo.Node(), 1) - return nil -} - -// RemovePod from pre-computed data in cycleState. -func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - state, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - state.updateWithPod(podInfoToRemove, nodeInfo.Node(), -1) - return nil -} - -func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { - c, err := cycleState.Read(preFilterStateKey) - if err != nil { - // preFilterState doesn't exist, likely PreFilter wasn't invoked. - return nil, fmt.Errorf("error reading %q from cycleState: %w", preFilterStateKey, err) - } - - s, ok := c.(*preFilterState) - if !ok { - return nil, fmt.Errorf("%+v convert to interpodaffinity.state error", c) - } - return s, nil -} - -// Checks if scheduling the pod onto this node would break any anti-affinity -// terms indicated by the existing pods. -func satisfyExistingPodsAntiAffinity(state *preFilterState, nodeInfo *framework.NodeInfo) bool { - if len(state.existingAntiAffinityCounts) > 0 { - // Iterate over topology pairs to get any of the pods being affected by - // the scheduled pod anti-affinity terms - for topologyKey, topologyValue := range nodeInfo.Node().Labels { - tp := topologyPair{key: topologyKey, value: topologyValue} - if state.existingAntiAffinityCounts[tp] > 0 { - return false - } - } - } - return true -} - -// Checks if the node satisfies the incoming pod's anti-affinity rules. -func satisfyPodAntiAffinity(state *preFilterState, nodeInfo *framework.NodeInfo) bool { - if len(state.antiAffinityCounts) > 0 { - for _, term := range state.podInfo.RequiredAntiAffinityTerms { - if topologyValue, ok := nodeInfo.Node().Labels[term.TopologyKey]; ok { - tp := topologyPair{key: term.TopologyKey, value: topologyValue} - if state.antiAffinityCounts[tp] > 0 { - return false - } - } - } - } - return true -} - -// Checks if the node satisfies the incoming pod's affinity rules. -func satisfyPodAffinity(state *preFilterState, nodeInfo *framework.NodeInfo) bool { - podsExist := true - for _, term := range state.podInfo.RequiredAffinityTerms { - if topologyValue, ok := nodeInfo.Node().Labels[term.TopologyKey]; ok { - tp := topologyPair{key: term.TopologyKey, value: topologyValue} - if state.affinityCounts[tp] <= 0 { - podsExist = false - } - } else { - // All topology labels must exist on the node. - return false - } - } - - if !podsExist { - // This pod may be the first pod in a series that have affinity to themselves. In order - // to not leave such pods in pending state forever, we check that if no other pod - // in the cluster matches the namespace and selector of this pod, the pod matches - // its own terms, and the node has all the requested topologies, then we allow the pod - // to pass the affinity check. - if len(state.affinityCounts) == 0 && podMatchesAllAffinityTerms(state.podInfo.RequiredAffinityTerms, state.podInfo.Pod) { - return true - } - return false - } - return true -} - -// Filter invoked at the filter extension point. -// It checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. -func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - - state, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - - if !satisfyPodAffinity(state, nodeInfo) { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonAffinityRulesNotMatch) - } - - if !satisfyPodAntiAffinity(state, nodeInfo) { - return framework.NewStatus(framework.Unschedulable, ErrReasonAntiAffinityRulesNotMatch) - } - - if !satisfyExistingPodsAntiAffinity(state, nodeInfo) { - return framework.NewStatus(framework.Unschedulable, ErrReasonExistingAntiAffinityRulesNotMatch) - } - - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go deleted file mode 100644 index 78d6d65e824..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go +++ /dev/null @@ -1,299 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package interpodaffinity - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - listersv1 "k8s.io/client-go/listers/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/parallelize" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// Name is the name of the plugin used in the plugin registry and configurations. -const Name = names.InterPodAffinity - -var _ framework.PreFilterPlugin = &InterPodAffinity{} -var _ framework.FilterPlugin = &InterPodAffinity{} -var _ framework.PreScorePlugin = &InterPodAffinity{} -var _ framework.ScorePlugin = &InterPodAffinity{} -var _ framework.EnqueueExtensions = &InterPodAffinity{} - -// InterPodAffinity is a plugin that checks inter pod affinity -type InterPodAffinity struct { - parallelizer parallelize.Parallelizer - args config.InterPodAffinityArgs - sharedLister framework.SharedLister - nsLister listersv1.NamespaceLister - enableSchedulingQueueHint bool -} - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *InterPodAffinity) Name() string { - return Name -} - -// EventsToRegister returns the possible events that may make a failed Pod -// schedulable -func (pl *InterPodAffinity) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - // A note about UpdateNodeTaint event: - // Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - nodeActionType := framework.Add | framework.UpdateNodeLabel | framework.UpdateNodeTaint - if pl.enableSchedulingQueueHint { - // When QueueingHint is enabled, we don't use preCheck and we don't need to register UpdateNodeTaint event. - nodeActionType = framework.Add | framework.UpdateNodeLabel - } - return []framework.ClusterEventWithHint{ - // All ActionType includes the following events: - // - Delete. An unschedulable Pod may fail due to violating an existing Pod's anti-affinity constraints, - // deleting an existing Pod may make it schedulable. - // - UpdatePodLabel. Updating on an existing Pod's labels (e.g., removal) may make - // an unschedulable Pod schedulable. - // - Add. An unschedulable Pod may fail due to violating pod-affinity constraints, - // adding an assigned Pod may make it schedulable. - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Add | framework.UpdatePodLabel | framework.Delete}, QueueingHintFn: pl.isSchedulableAfterPodChange}, - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}, QueueingHintFn: pl.isSchedulableAfterNodeChange}, - }, nil -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { - if h.SnapshotSharedLister() == nil { - return nil, fmt.Errorf("SnapshotSharedlister is nil") - } - args, err := getArgs(plArgs) - if err != nil { - return nil, err - } - if err := validation.ValidateInterPodAffinityArgs(nil, &args); err != nil { - return nil, err - } - pl := &InterPodAffinity{ - parallelizer: h.Parallelizer(), - args: args, - sharedLister: h.SnapshotSharedLister(), - nsLister: h.SharedInformerFactory().Core().V1().Namespaces().Lister(), - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - } - - return pl, nil -} - -func getArgs(obj runtime.Object) (config.InterPodAffinityArgs, error) { - ptr, ok := obj.(*config.InterPodAffinityArgs) - if !ok { - return config.InterPodAffinityArgs{}, fmt.Errorf("want args to be of type InterPodAffinityArgs, got %T", obj) - } - return *ptr, nil -} - -// Updates Namespaces with the set of namespaces identified by NamespaceSelector. -// If successful, NamespaceSelector is set to nil. -// The assumption is that the term is for an incoming pod, in which case -// namespaceSelector is either unrolled into Namespaces (and so the selector -// is set to Nothing()) or is Empty(), which means match everything. Therefore, -// there when matching against this term, there is no need to lookup the existing -// pod's namespace labels to match them against term's namespaceSelector explicitly. -func (pl *InterPodAffinity) mergeAffinityTermNamespacesIfNotEmpty(at *framework.AffinityTerm) error { - if at.NamespaceSelector.Empty() { - return nil - } - ns, err := pl.nsLister.List(at.NamespaceSelector) - if err != nil { - return err - } - for _, n := range ns { - at.Namespaces.Insert(n.Name) - } - at.NamespaceSelector = labels.Nothing() - return nil -} - -// GetNamespaceLabelsSnapshot returns a snapshot of the labels associated with -// the namespace. -func GetNamespaceLabelsSnapshot(logger klog.Logger, ns string, nsLister listersv1.NamespaceLister) (nsLabels labels.Set) { - podNS, err := nsLister.Get(ns) - if err == nil { - // Create and return snapshot of the labels. - return labels.Merge(podNS.Labels, nil) - } - logger.V(3).Info("getting namespace, assuming empty set of namespace labels", "namespace", ns, "err", err) - return -} - -func (pl *InterPodAffinity) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalPod, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - if (modifiedPod != nil && modifiedPod.Spec.NodeName == "") || (originalPod != nil && originalPod.Spec.NodeName == "") { - logger.V(5).Info("the added/updated/deleted pod is unscheduled, so it doesn't make the target pod schedulable", - "pod", klog.KObj(pod), "originalPod", klog.KObj(originalPod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - - terms, err := framework.GetAffinityTerms(pod, framework.GetPodAffinityTerms(pod.Spec.Affinity)) - if err != nil { - return framework.Queue, err - } - - antiTerms, err := framework.GetAffinityTerms(pod, framework.GetPodAntiAffinityTerms(pod.Spec.Affinity)) - if err != nil { - return framework.Queue, err - } - - // Pod is updated. Return Queue when the updated pod matching the target pod's affinity or not matching anti-affinity. - // Note that, we don't need to check each affinity individually when the Pod has more than one affinity - // because the current PodAffinity looks for a **single** existing pod that can satisfy **all** the terms of inter-pod affinity of an incoming pod. - if modifiedPod != nil && originalPod != nil { - if !podMatchesAllAffinityTerms(terms, originalPod) && podMatchesAllAffinityTerms(terms, modifiedPod) { - logger.V(5).Info("a scheduled pod was updated to match the target pod's affinity, and the pod may be schedulable now", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.Queue, nil - } - if podMatchesAllAffinityTerms(antiTerms, originalPod) && !podMatchesAllAffinityTerms(antiTerms, modifiedPod) { - logger.V(5).Info("a scheduled pod was updated not to match the target pod's anti affinity, and the pod may be schedulable now", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.Queue, nil - } - logger.V(5).Info("a scheduled pod was updated but it doesn't match the target pod's affinity or does match the target pod's anti-affinity", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - - // Pod is added. Return Queue when the added pod matching the target pod's affinity. - if modifiedPod != nil { - if podMatchesAllAffinityTerms(terms, modifiedPod) { - logger.V(5).Info("a scheduled pod was added and it matches the target pod's affinity", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.Queue, nil - } - logger.V(5).Info("a scheduled pod was added and it doesn't match the target pod's affinity", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - - // Pod is deleted. Return Queue when the deleted pod matching the target pod's anti-affinity. - if !podMatchesAllAffinityTerms(antiTerms, originalPod) { - logger.V(5).Info("a scheduled pod was deleted but it doesn't match the target pod's anti-affinity", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - logger.V(5).Info("a scheduled pod was deleted and it matches the target pod's anti-affinity. The pod may be schedulable now", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.Queue, nil -} - -func (pl *InterPodAffinity) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - terms, err := framework.GetAffinityTerms(pod, framework.GetPodAffinityTerms(pod.Spec.Affinity)) - if err != nil { - return framework.Queue, err - } - - // When queuing this Pod: - // - 1. A new node is added with the pod affinity topologyKey, the pod may become schedulable. - // - 2. The original node does not have the pod affinity topologyKey but the modified node does, the pod may become schedulable. - // - 3. Both the original and modified nodes have the pod affinity topologyKey and they differ, the pod may become schedulable. - for _, term := range terms { - if originalNode == nil { - if _, ok := modifiedNode.Labels[term.TopologyKey]; ok { - // Case 1: A new node is added with the pod affinity topologyKey. - logger.V(5).Info("A node with a matched pod affinity topologyKey was added and it may make the pod schedulable", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - continue - } - originalTopologyValue, originalHasKey := originalNode.Labels[term.TopologyKey] - modifiedTopologyValue, modifiedHasKey := modifiedNode.Labels[term.TopologyKey] - - if !originalHasKey && modifiedHasKey { - // Case 2: Original node does not have the pod affinity topologyKey, but the modified node does. - logger.V(5).Info("A node got updated to have the topology key of pod affinity, which may make the pod schedulable", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - - if originalHasKey && modifiedHasKey && (originalTopologyValue != modifiedTopologyValue) { - // Case 3: Both nodes have the pod affinity topologyKey, but the values differ. - logger.V(5).Info("A node is moved to a different domain of pod affinity, which may make the pod schedulable", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - } - - antiTerms, err := framework.GetAffinityTerms(pod, framework.GetPodAntiAffinityTerms(pod.Spec.Affinity)) - if err != nil { - return framework.Queue, err - } - - // When queuing this Pod: - // - 1. A new node is added, the pod may become schedulable. - // - 2. The original node have the pod anti-affinity topologyKey but the modified node does not, the pod may become schedulable. - // - 3. Both the original and modified nodes have the pod anti-affinity topologyKey and they differ, the pod may become schedulable. - for _, term := range antiTerms { - if originalNode == nil { - // Case 1: A new node is added. - // We always requeue the Pod with anti-affinity because: - // - the node without the topology key is always allowed to have a Pod with anti-affinity. - // - the addition of a node with the topology key makes Pods schedulable only when the topology it joins doesn't have any Pods that the Pod hates. - // But, it's out-of-scope of this QHint to check which Pods are in the topology this Node is in. - logger.V(5).Info("A node was added and it may make the pod schedulable", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - - originalTopologyValue, originalHasKey := originalNode.Labels[term.TopologyKey] - modifiedTopologyValue, modifiedHasKey := modifiedNode.Labels[term.TopologyKey] - - if originalHasKey && !modifiedHasKey { - // Case 2: The original node have the pod anti-affinity topologyKey but the modified node does not. - // Note that we don't need to check the opposite case (!originalHasKey && modifiedHasKey) - // because the node without the topology label can always accept pods with pod anti-affinity. - logger.V(5).Info("A node got updated to not have the topology key of pod anti-affinity, which may make the pod schedulable", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - - if originalHasKey && modifiedHasKey && (originalTopologyValue != modifiedTopologyValue) { - // Case 3: Both nodes have the pod anti-affinity topologyKey, but the values differ. - logger.V(5).Info("A node is moved to a different domain of pod anti-affinity, which may make the pod schedulable", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - } - logger.V(5).Info("a node is added/updated but doesn't have any topologyKey which matches pod affinity/anti-affinity", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.QueueSkip, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go deleted file mode 100644 index 126f54a472b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go +++ /dev/null @@ -1,294 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package interpodaffinity - -import ( - "context" - "fmt" - "math" - "sync/atomic" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// preScoreStateKey is the key in CycleState to InterPodAffinity pre-computed data for Scoring. -const preScoreStateKey = "PreScore" + Name - -type scoreMap map[string]map[string]int64 - -// preScoreState computed at PreScore and used at Score. -type preScoreState struct { - topologyScore scoreMap - podInfo *framework.PodInfo - // A copy of the incoming pod's namespace labels. - namespaceLabels labels.Set -} - -// Clone implements the mandatory Clone interface. We don't really copy the data since -// there is no need for that. -func (s *preScoreState) Clone() framework.StateData { - return s -} - -func (m scoreMap) processTerm(term *framework.AffinityTerm, weight int32, pod *v1.Pod, nsLabels labels.Set, node *v1.Node, multiplier int32) { - if term.Matches(pod, nsLabels) { - if tpValue, tpValueExist := node.Labels[term.TopologyKey]; tpValueExist { - if m[term.TopologyKey] == nil { - m[term.TopologyKey] = make(map[string]int64) - } - m[term.TopologyKey][tpValue] += int64(weight * multiplier) - } - } -} - -func (m scoreMap) processTerms(terms []framework.WeightedAffinityTerm, pod *v1.Pod, nsLabels labels.Set, node *v1.Node, multiplier int32) { - for _, term := range terms { - m.processTerm(&term.AffinityTerm, term.Weight, pod, nsLabels, node, multiplier) - } -} - -func (m scoreMap) append(other scoreMap) { - for topology, oScores := range other { - scores := m[topology] - if scores == nil { - m[topology] = oScores - continue - } - for k, v := range oScores { - scores[k] += v - } - } -} - -func (pl *InterPodAffinity) processExistingPod( - state *preScoreState, - existingPod *framework.PodInfo, - existingPodNodeInfo *framework.NodeInfo, - incomingPod *v1.Pod, - topoScore scoreMap, -) { - existingPodNode := existingPodNodeInfo.Node() - if len(existingPodNode.Labels) == 0 { - return - } - - // For every soft pod affinity term of , if matches the term, - // increment for every node in the cluster with the same - // value as that of `s node by the term`s weight. - // Note that the incoming pod's terms have the namespaceSelector merged into the namespaces, and so - // here we don't lookup the existing pod's namespace labels, hence passing nil for nsLabels. - topoScore.processTerms(state.podInfo.PreferredAffinityTerms, existingPod.Pod, nil, existingPodNode, 1) - - // For every soft pod anti-affinity term of , if matches the term, - // decrement for every node in the cluster with the same - // value as that of `s node by the term`s weight. - // Note that the incoming pod's terms have the namespaceSelector merged into the namespaces, and so - // here we don't lookup the existing pod's namespace labels, hence passing nil for nsLabels. - topoScore.processTerms(state.podInfo.PreferredAntiAffinityTerms, existingPod.Pod, nil, existingPodNode, -1) - - // For every hard pod affinity term of , if matches the term, - // increment for every node in the cluster with the same - // value as that of 's node by the constant - if pl.args.HardPodAffinityWeight > 0 && len(existingPodNode.Labels) != 0 { - for _, t := range existingPod.RequiredAffinityTerms { - topoScore.processTerm(&t, pl.args.HardPodAffinityWeight, incomingPod, state.namespaceLabels, existingPodNode, 1) - } - } - - // For every soft pod affinity term of , if matches the term, - // increment for every node in the cluster with the same - // value as that of 's node by the term's weight. - topoScore.processTerms(existingPod.PreferredAffinityTerms, incomingPod, state.namespaceLabels, existingPodNode, 1) - - // For every soft pod anti-affinity term of , if matches the term, - // decrement for every node in the cluster with the same - // value as that of 's node by the term's weight. - topoScore.processTerms(existingPod.PreferredAntiAffinityTerms, incomingPod, state.namespaceLabels, existingPodNode, -1) -} - -// PreScore builds and writes cycle state used by Score and NormalizeScore. -func (pl *InterPodAffinity) PreScore( - pCtx context.Context, - cycleState *framework.CycleState, - pod *v1.Pod, - nodes []*framework.NodeInfo, -) *framework.Status { - - if pl.sharedLister == nil { - return framework.NewStatus(framework.Error, "empty shared lister in InterPodAffinity PreScore") - } - - affinity := pod.Spec.Affinity - hasPreferredAffinityConstraints := affinity != nil && affinity.PodAffinity != nil && len(affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0 - hasPreferredAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil && len(affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0 - hasConstraints := hasPreferredAffinityConstraints || hasPreferredAntiAffinityConstraints - - // Optionally ignore calculating preferences of existing pods' affinity rules - // if the incoming pod has no inter-pod affinities. - if pl.args.IgnorePreferredTermsOfExistingPods && !hasConstraints { - return framework.NewStatus(framework.Skip) - } - - // Unless the pod being scheduled has preferred affinity terms, we only - // need to process nodes hosting pods with affinity. - var allNodes []*framework.NodeInfo - var err error - if hasConstraints { - allNodes, err = pl.sharedLister.NodeInfos().List() - if err != nil { - return framework.AsStatus(fmt.Errorf("failed to get all nodes from shared lister: %w", err)) - } - } else { - allNodes, err = pl.sharedLister.NodeInfos().HavePodsWithAffinityList() - if err != nil { - return framework.AsStatus(fmt.Errorf("failed to get pods with affinity list: %w", err)) - } - } - - state := &preScoreState{ - topologyScore: make(map[string]map[string]int64), - } - - if state.podInfo, err = framework.NewPodInfo(pod); err != nil { - // Ideally we never reach here, because errors will be caught by PreFilter - return framework.AsStatus(fmt.Errorf("failed to parse pod: %w", err)) - } - - for i := range state.podInfo.PreferredAffinityTerms { - if err := pl.mergeAffinityTermNamespacesIfNotEmpty(&state.podInfo.PreferredAffinityTerms[i].AffinityTerm); err != nil { - return framework.AsStatus(fmt.Errorf("updating PreferredAffinityTerms: %w", err)) - } - } - for i := range state.podInfo.PreferredAntiAffinityTerms { - if err := pl.mergeAffinityTermNamespacesIfNotEmpty(&state.podInfo.PreferredAntiAffinityTerms[i].AffinityTerm); err != nil { - return framework.AsStatus(fmt.Errorf("updating PreferredAntiAffinityTerms: %w", err)) - } - } - logger := klog.FromContext(pCtx) - state.namespaceLabels = GetNamespaceLabelsSnapshot(logger, pod.Namespace, pl.nsLister) - - topoScores := make([]scoreMap, len(allNodes)) - index := int32(-1) - processNode := func(i int) { - nodeInfo := allNodes[i] - - // Unless the pod being scheduled has preferred affinity terms, we only - // need to process pods with affinity in the node. - podsToProcess := nodeInfo.PodsWithAffinity - if hasConstraints { - // We need to process all the pods. - podsToProcess = nodeInfo.Pods - } - - topoScore := make(scoreMap) - for _, existingPod := range podsToProcess { - pl.processExistingPod(state, existingPod, nodeInfo, pod, topoScore) - } - if len(topoScore) > 0 { - topoScores[atomic.AddInt32(&index, 1)] = topoScore - } - } - pl.parallelizer.Until(pCtx, len(allNodes), processNode, pl.Name()) - - if index == -1 { - return framework.NewStatus(framework.Skip) - } - - for i := 0; i <= int(index); i++ { - state.topologyScore.append(topoScores[i]) - } - - cycleState.Write(preScoreStateKey, state) - return nil -} - -func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) { - c, err := cycleState.Read(preScoreStateKey) - if err != nil { - return nil, fmt.Errorf("failed to read %q from cycleState: %w", preScoreStateKey, err) - } - - s, ok := c.(*preScoreState) - if !ok { - return nil, fmt.Errorf("%+v convert to interpodaffinity.preScoreState error", c) - } - return s, nil -} - -// Score invoked at the Score extension point. -// The "score" returned in this function is the sum of weights got from cycleState which have its topologyKey matching with the node's labels. -// it is normalized later. -// Note: the returned "score" is positive for pod-affinity, and negative for pod-antiaffinity. -func (pl *InterPodAffinity) Score(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - node := nodeInfo.Node() - - s, err := getPreScoreState(cycleState) - if err != nil { - return 0, framework.AsStatus(err) - } - var score int64 - for tpKey, tpValues := range s.topologyScore { - if v, exist := node.Labels[tpKey]; exist { - score += tpValues[v] - } - } - - return score, nil -} - -// NormalizeScore normalizes the score for each filteredNode. -func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { - s, err := getPreScoreState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - if len(s.topologyScore) == 0 { - return nil - } - - var minCount int64 = math.MaxInt64 - var maxCount int64 = math.MinInt64 - for i := range scores { - score := scores[i].Score - if score > maxCount { - maxCount = score - } - if score < minCount { - minCount = score - } - } - - maxMinDiff := maxCount - minCount - for i := range scores { - fScore := float64(0) - if maxMinDiff > 0 { - fScore = float64(framework.MaxNodeScore) * (float64(scores[i].Score-minCount) / float64(maxMinDiff)) - } - - scores[i].Score = int64(fScore) - } - - return nil -} - -// ScoreExtensions of the Score plugin. -func (pl *InterPodAffinity) ScoreExtensions() framework.ScoreExtensions { - return pl -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/names/names.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/names/names.go deleted file mode 100644 index a3cf1342338..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/names/names.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package names - -const ( - PrioritySort = "PrioritySort" - DefaultBinder = "DefaultBinder" - DefaultPreemption = "DefaultPreemption" - DynamicResources = "DynamicResources" - ImageLocality = "ImageLocality" - InterPodAffinity = "InterPodAffinity" - NodeAffinity = "NodeAffinity" - NodeName = "NodeName" - NodePorts = "NodePorts" - NodeResourcesBalancedAllocation = "NodeResourcesBalancedAllocation" - NodeResourcesFit = "NodeResourcesFit" - NodeUnschedulable = "NodeUnschedulable" - NodeVolumeLimits = "NodeVolumeLimits" - PodTopologySpread = "PodTopologySpread" - SchedulingGates = "SchedulingGates" - TaintToleration = "TaintToleration" - VolumeBinding = "VolumeBinding" - VolumeRestrictions = "VolumeRestrictions" - VolumeZone = "VolumeZone" -) diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go deleted file mode 100644 index f63efb61616..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go +++ /dev/null @@ -1,364 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodeaffinity - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// NodeAffinity is a plugin that checks if a pod node selector matches the node label. -type NodeAffinity struct { - handle framework.Handle - addedNodeSelector *nodeaffinity.NodeSelector - addedPrefSchedTerms *nodeaffinity.PreferredSchedulingTerms - enableSchedulingQueueHint bool -} - -var _ framework.PreFilterPlugin = &NodeAffinity{} -var _ framework.FilterPlugin = &NodeAffinity{} -var _ framework.PreScorePlugin = &NodeAffinity{} -var _ framework.ScorePlugin = &NodeAffinity{} -var _ framework.EnqueueExtensions = &NodeAffinity{} - -const ( - // Name is the name of the plugin used in the plugin registry and configurations. - Name = names.NodeAffinity - - // preScoreStateKey is the key in CycleState to NodeAffinity pre-computed data for Scoring. - preScoreStateKey = "PreScore" + Name - - // preFilterStateKey is the key in CycleState to NodeAffinity pre-compute data for Filtering. - preFilterStateKey = "PreFilter" + Name - - // ErrReasonPod is the reason for Pod's node affinity/selector not matching. - ErrReasonPod = "node(s) didn't match Pod's node affinity/selector" - - // errReasonEnforced is the reason for added node affinity not matching. - errReasonEnforced = "node(s) didn't match scheduler-enforced node affinity" - - // errReasonConflict is the reason for pod's conflicting affinity rules. - errReasonConflict = "pod affinity terms conflict" -) - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *NodeAffinity) Name() string { - return Name -} - -type preFilterState struct { - requiredNodeSelectorAndAffinity nodeaffinity.RequiredNodeAffinity -} - -// Clone just returns the same state because it is not affected by pod additions or deletions. -func (s *preFilterState) Clone() framework.StateData { - return s -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *NodeAffinity) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - // A note about UpdateNodeTaint event: - // Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - nodeActionType := framework.Add | framework.UpdateNodeLabel | framework.UpdateNodeTaint - if pl.enableSchedulingQueueHint { - // preCheck is not used when QHint is enabled, and hence we can use UpdateNodeLabel instead of Update. - nodeActionType = framework.Add | framework.UpdateNodeLabel - } - - return []framework.ClusterEventWithHint{ - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}, QueueingHintFn: pl.isSchedulableAfterNodeChange}, - }, nil -} - -// isSchedulableAfterNodeChange is invoked whenever a node changed. It checks whether -// that change made a previously unschedulable pod schedulable. -func (pl *NodeAffinity) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - if pl.addedNodeSelector != nil && !pl.addedNodeSelector.Match(modifiedNode) { - logger.V(4).Info("added or modified node didn't match scheduler-enforced node affinity and this event won't make the Pod schedulable", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.QueueSkip, nil - } - - requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod) - isMatched, err := requiredNodeAffinity.Match(modifiedNode) - if err != nil { - return framework.Queue, err - } - if !isMatched { - logger.V(5).Info("node was created or updated, but the pod's NodeAffinity doesn't match", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.QueueSkip, nil - } - // Since the node was added and it matches the pod's affinity criteria, we can unblock it. - if originalNode == nil { - logger.V(5).Info("node was created, and matches with the pod's NodeAffinity", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - // At this point we know the operation is update so we can narrow down the criteria to unmatch -> match changes only - // (necessary affinity label was added to the node in this case). - wasMatched, err := requiredNodeAffinity.Match(originalNode) - if err != nil { - return framework.Queue, err - } - if wasMatched { - logger.V(5).Info("node updated, but the pod's NodeAffinity hasn't changed", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.QueueSkip, nil - } - logger.V(5).Info("node was updated and the pod's NodeAffinity changed to matched", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil -} - -// PreFilter builds and writes cycle state used by Filter. -func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - affinity := pod.Spec.Affinity - noNodeAffinity := (affinity == nil || - affinity.NodeAffinity == nil || - affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil) - if noNodeAffinity && pl.addedNodeSelector == nil && pod.Spec.NodeSelector == nil { - // NodeAffinity Filter has nothing to do with the Pod. - return nil, framework.NewStatus(framework.Skip) - } - - state := &preFilterState{requiredNodeSelectorAndAffinity: nodeaffinity.GetRequiredNodeAffinity(pod)} - cycleState.Write(preFilterStateKey, state) - - if noNodeAffinity || len(affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { - return nil, nil - } - - // Check if there is affinity to a specific node and return it. - terms := affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms - var nodeNames sets.Set[string] - for _, t := range terms { - var termNodeNames sets.Set[string] - for _, r := range t.MatchFields { - if r.Key == metav1.ObjectNameField && r.Operator == v1.NodeSelectorOpIn { - // The requirements represent ANDed constraints, and so we need to - // find the intersection of nodes. - s := sets.New(r.Values...) - if termNodeNames == nil { - termNodeNames = s - } else { - termNodeNames = termNodeNames.Intersection(s) - } - } - } - if termNodeNames == nil { - // If this term has no node.Name field affinity, - // then all nodes are eligible because the terms are ORed. - return nil, nil - } - nodeNames = nodeNames.Union(termNodeNames) - } - // If nodeNames is not nil, but length is 0, it means each term have conflicting affinity to node.Name; - // therefore, pod will not match any node. - if nodeNames != nil && len(nodeNames) == 0 { - return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, errReasonConflict) - } else if len(nodeNames) > 0 { - return &framework.PreFilterResult{NodeNames: nodeNames}, nil - } - return nil, nil - -} - -// PreFilterExtensions not necessary for this plugin as state doesn't depend on pod additions or deletions. -func (pl *NodeAffinity) PreFilterExtensions() framework.PreFilterExtensions { - return nil -} - -// Filter checks if the Node matches the Pod .spec.affinity.nodeAffinity and -// the plugin's added affinity. -func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - node := nodeInfo.Node() - - if pl.addedNodeSelector != nil && !pl.addedNodeSelector.Match(node) { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, errReasonEnforced) - } - - s, err := getPreFilterState(state) - if err != nil { - // Fallback to calculate requiredNodeSelector and requiredNodeAffinity - // here when PreFilter is disabled. - s = &preFilterState{requiredNodeSelectorAndAffinity: nodeaffinity.GetRequiredNodeAffinity(pod)} - } - - // Ignore parsing errors for backwards compatibility. - match, _ := s.requiredNodeSelectorAndAffinity.Match(node) - if !match { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonPod) - } - - return nil -} - -// preScoreState computed at PreScore and used at Score. -type preScoreState struct { - preferredNodeAffinity *nodeaffinity.PreferredSchedulingTerms -} - -// Clone implements the mandatory Clone interface. We don't really copy the data since -// there is no need for that. -func (s *preScoreState) Clone() framework.StateData { - return s -} - -// PreScore builds and writes cycle state used by Score and NormalizeScore. -func (pl *NodeAffinity) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status { - preferredNodeAffinity, err := getPodPreferredNodeAffinity(pod) - if err != nil { - return framework.AsStatus(err) - } - if preferredNodeAffinity == nil && pl.addedPrefSchedTerms == nil { - // NodeAffinity Score has nothing to do with the Pod. - return framework.NewStatus(framework.Skip) - } - state := &preScoreState{ - preferredNodeAffinity: preferredNodeAffinity, - } - cycleState.Write(preScoreStateKey, state) - return nil -} - -// Score returns the sum of the weights of the terms that match the Node. -// Terms came from the Pod .spec.affinity.nodeAffinity and from the plugin's -// default affinity. -func (pl *NodeAffinity) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - node := nodeInfo.Node() - - var count int64 - if pl.addedPrefSchedTerms != nil { - count += pl.addedPrefSchedTerms.Score(node) - } - - s, err := getPreScoreState(state) - if err != nil { - // Fallback to calculate preferredNodeAffinity here when PreScore is disabled. - preferredNodeAffinity, err := getPodPreferredNodeAffinity(pod) - if err != nil { - return 0, framework.AsStatus(err) - } - s = &preScoreState{ - preferredNodeAffinity: preferredNodeAffinity, - } - } - - if s.preferredNodeAffinity != nil { - count += s.preferredNodeAffinity.Score(node) - } - - return count, nil -} - -// NormalizeScore invoked after scoring all nodes. -func (pl *NodeAffinity) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { - return helper.DefaultNormalizeScore(framework.MaxNodeScore, false, scores) -} - -// ScoreExtensions of the Score plugin. -func (pl *NodeAffinity) ScoreExtensions() framework.ScoreExtensions { - return pl -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { - args, err := getArgs(plArgs) - if err != nil { - return nil, err - } - pl := &NodeAffinity{ - handle: h, - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - } - if args.AddedAffinity != nil { - if ns := args.AddedAffinity.RequiredDuringSchedulingIgnoredDuringExecution; ns != nil { - pl.addedNodeSelector, err = nodeaffinity.NewNodeSelector(ns) - if err != nil { - return nil, fmt.Errorf("parsing addedAffinity.requiredDuringSchedulingIgnoredDuringExecution: %w", err) - } - } - // TODO: parse requiredDuringSchedulingRequiredDuringExecution when it gets added to the API. - if terms := args.AddedAffinity.PreferredDuringSchedulingIgnoredDuringExecution; len(terms) != 0 { - pl.addedPrefSchedTerms, err = nodeaffinity.NewPreferredSchedulingTerms(terms) - if err != nil { - return nil, fmt.Errorf("parsing addedAffinity.preferredDuringSchedulingIgnoredDuringExecution: %w", err) - } - } - } - return pl, nil -} - -func getArgs(obj runtime.Object) (config.NodeAffinityArgs, error) { - ptr, ok := obj.(*config.NodeAffinityArgs) - if !ok { - return config.NodeAffinityArgs{}, fmt.Errorf("args are not of type NodeAffinityArgs, got %T", obj) - } - return *ptr, validation.ValidateNodeAffinityArgs(nil, ptr) -} - -func getPodPreferredNodeAffinity(pod *v1.Pod) (*nodeaffinity.PreferredSchedulingTerms, error) { - affinity := pod.Spec.Affinity - if affinity != nil && affinity.NodeAffinity != nil && affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { - return nodeaffinity.NewPreferredSchedulingTerms(affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution) - } - return nil, nil -} - -func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) { - c, err := cycleState.Read(preScoreStateKey) - if err != nil { - return nil, fmt.Errorf("reading %q from cycleState: %w", preScoreStateKey, err) - } - - s, ok := c.(*preScoreState) - if !ok { - return nil, fmt.Errorf("invalid PreScore state, got type %T", c) - } - return s, nil -} - -func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { - c, err := cycleState.Read(preFilterStateKey) - if err != nil { - return nil, fmt.Errorf("reading %q from cycleState: %v", preFilterStateKey, err) - } - - s, ok := c.(*preFilterState) - if !ok { - return nil, fmt.Errorf("invalid PreFilter state, got type %T", c) - } - return s, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go deleted file mode 100644 index 29605a464c3..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodename - -import ( - "context" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" -) - -// NodeName is a plugin that checks if a pod spec node name matches the current node. -type NodeName struct { - enableSchedulingQueueHint bool -} - -var _ framework.FilterPlugin = &NodeName{} -var _ framework.EnqueueExtensions = &NodeName{} - -const ( - // Name is the name of the plugin used in the plugin registry and configurations. - Name = names.NodeName - - // ErrReason returned when node name doesn't match. - ErrReason = "node(s) didn't match the requested node name" -) - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *NodeName) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - // A note about UpdateNodeTaint/UpdateNodeLabel event: - // Ideally, it's supposed to register only Add because any Node update event will never change the result from this plugin. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - nodeActionType := framework.Add | framework.UpdateNodeTaint | framework.UpdateNodeLabel - if pl.enableSchedulingQueueHint { - // preCheck is not used when QHint is enabled, and hence Update event isn't necessary. - nodeActionType = framework.Add - } - - return []framework.ClusterEventWithHint{ - // We don't need the QueueingHintFn here because the scheduling of Pods will be always retried with backoff when this Event happens. - // (the same as Queue) - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}}, - }, nil -} - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *NodeName) Name() string { - return Name -} - -// Filter invoked at the filter extension point. -func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - - if !Fits(pod, nodeInfo) { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason) - } - return nil -} - -// Fits actually checks if the pod fits the node. -func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { - return len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == nodeInfo.Node().Name -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, _ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) { - return &NodeName{ - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - }, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go deleted file mode 100644 index 21835f6c122..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go +++ /dev/null @@ -1,215 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodeports - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// NodePorts is a plugin that checks if a node has free ports for the requested pod ports. -type NodePorts struct { - enableSchedulingQueueHint bool -} - -var _ framework.PreFilterPlugin = &NodePorts{} -var _ framework.FilterPlugin = &NodePorts{} -var _ framework.EnqueueExtensions = &NodePorts{} - -const ( - // Name is the name of the plugin used in the plugin registry and configurations. - Name = names.NodePorts - - // preFilterStateKey is the key in CycleState to NodePorts pre-computed data. - // Using the name of the plugin will likely help us avoid collisions with other plugins. - preFilterStateKey = "PreFilter" + Name - - // ErrReason when node ports aren't available. - ErrReason = "node(s) didn't have free ports for the requested pod ports" -) - -type preFilterState []*v1.ContainerPort - -// Clone the prefilter state. -func (s preFilterState) Clone() framework.StateData { - // The state is not impacted by adding/removing existing pods, hence we don't need to make a deep copy. - return s -} - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *NodePorts) Name() string { - return Name -} - -// getContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair -// will be in the result; but it does not resolve port conflict. -func getContainerPorts(pods ...*v1.Pod) []*v1.ContainerPort { - ports := []*v1.ContainerPort{} - for _, pod := range pods { - for j := range pod.Spec.Containers { - container := &pod.Spec.Containers[j] - for k := range container.Ports { - // Only return ports with a host port specified. - if container.Ports[k].HostPort <= 0 { - continue - } - ports = append(ports, &container.Ports[k]) - } - } - } - return ports -} - -// PreFilter invoked at the prefilter extension point. -func (pl *NodePorts) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - s := getContainerPorts(pod) - // Skip if a pod has no ports. - if len(s) == 0 { - return nil, framework.NewStatus(framework.Skip) - } - cycleState.Write(preFilterStateKey, preFilterState(s)) - return nil, nil -} - -// PreFilterExtensions do not exist for this plugin. -func (pl *NodePorts) PreFilterExtensions() framework.PreFilterExtensions { - return nil -} - -func getPreFilterState(cycleState *framework.CycleState) (preFilterState, error) { - c, err := cycleState.Read(preFilterStateKey) - if err != nil { - // preFilterState doesn't exist, likely PreFilter wasn't invoked. - return nil, fmt.Errorf("reading %q from cycleState: %w", preFilterStateKey, err) - } - - s, ok := c.(preFilterState) - if !ok { - return nil, fmt.Errorf("%+v convert to nodeports.preFilterState error", c) - } - return s, nil -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *NodePorts) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - // A note about UpdateNodeTaint/UpdateNodeLabel event: - // Ideally, it's supposed to register only Add because NodeUpdated event never means to have any free ports for the Pod. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - nodeActionType := framework.Add | framework.UpdateNodeTaint | framework.UpdateNodeLabel - if pl.enableSchedulingQueueHint { - // preCheck is not used when QHint is enabled, and hence Update event isn't necessary. - nodeActionType = framework.Add - } - - return []framework.ClusterEventWithHint{ - // Due to immutable fields `spec.containers[*].ports`, pod update events are ignored. - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Delete}, QueueingHintFn: pl.isSchedulableAfterPodDeleted}, - // We don't need the QueueingHintFn here because the scheduling of Pods will be always retried with backoff when this Event happens. - // (the same as Queue) - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}}, - }, nil -} - -// isSchedulableAfterPodDeleted is invoked whenever a pod deleted. It checks whether -// that change made a previously unschedulable pod schedulable. -func (pl *NodePorts) isSchedulableAfterPodDeleted(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - deletedPod, _, err := util.As[*v1.Pod](oldObj, nil) - if err != nil { - return framework.Queue, err - } - - // If the deleted pod is unscheduled, it doesn't make the target pod schedulable. - if deletedPod.Spec.NodeName == "" && deletedPod.Status.NominatedNodeName == "" { - logger.V(4).Info("the deleted pod is unscheduled and it doesn't make the target pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod)) - return framework.QueueSkip, nil - } - - // Get the used host ports of the deleted pod. - usedPorts := make(framework.HostPortInfo) - for _, container := range deletedPod.Spec.Containers { - for _, podPort := range container.Ports { - if podPort.HostPort > 0 { - usedPorts.Add(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) - } - } - } - - // If the deleted pod doesn't use any host ports, it doesn't make the target pod schedulable. - if len(usedPorts) == 0 { - return framework.QueueSkip, nil - } - - // Construct a fake NodeInfo that only has the deleted Pod. - // If we can schedule `pod` to this fake node, it means that `pod` and the deleted pod don't have any common port(s). - // So, deleting that pod couldn't make `pod` schedulable. - nodeInfo := framework.NodeInfo{UsedPorts: usedPorts} - if Fits(pod, &nodeInfo) { - logger.V(4).Info("the deleted pod and the target pod don't have any common port(s), returning QueueSkip as deleting this Pod won't make the Pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod)) - return framework.QueueSkip, nil - } - - logger.V(4).Info("the deleted pod and the target pod have any common port(s), returning Queue as deleting this Pod may make the Pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod)) - return framework.Queue, nil -} - -// Filter invoked at the filter extension point. -func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - wantPorts, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - - fits := fitsPorts(wantPorts, nodeInfo) - if !fits { - return framework.NewStatus(framework.Unschedulable, ErrReason) - } - - return nil -} - -// Fits checks if the pod fits the node. -func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { - return fitsPorts(getContainerPorts(pod), nodeInfo) -} - -func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *framework.NodeInfo) bool { - // try to see whether existingPorts and wantPorts will conflict or not - existingPorts := nodeInfo.UsedPorts - for _, cp := range wantPorts { - if existingPorts.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) { - return false - } - } - return true -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, _ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) { - return &NodePorts{ - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - }, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go deleted file mode 100644 index 30c31f14e8f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderesources - -import ( - "context" - "fmt" - "math" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" -) - -// BalancedAllocation is a score plugin that calculates the difference between the cpu and memory fraction -// of capacity, and prioritizes the host based on how close the two metrics are to each other. -type BalancedAllocation struct { - handle framework.Handle - resourceAllocationScorer -} - -var _ framework.PreScorePlugin = &BalancedAllocation{} -var _ framework.ScorePlugin = &BalancedAllocation{} - -// BalancedAllocationName is the name of the plugin used in the plugin registry and configurations. -const ( - BalancedAllocationName = names.NodeResourcesBalancedAllocation - - // balancedAllocationPreScoreStateKey is the key in CycleState to NodeResourcesBalancedAllocation pre-computed data for Scoring. - balancedAllocationPreScoreStateKey = "PreScore" + BalancedAllocationName -) - -// balancedAllocationPreScoreState computed at PreScore and used at Score. -type balancedAllocationPreScoreState struct { - // podRequests have the same order of the resources defined in NodeResourcesFitArgs.Resources, - // same for other place we store a list like that. - podRequests []int64 -} - -// Clone implements the mandatory Clone interface. We don't really copy the data since -// there is no need for that. -func (s *balancedAllocationPreScoreState) Clone() framework.StateData { - return s -} - -// PreScore calculates incoming pod's resource requests and writes them to the cycle state used. -func (ba *BalancedAllocation) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status { - podRequests := ba.calculatePodResourceRequestList(pod, ba.resources) - if ba.isBestEffortPod(podRequests) { - // Skip BalancedAllocation scoring for best-effort pods to - // prevent a large number of pods from being scheduled to the same node. - // See https://github.com/kubernetes/kubernetes/issues/129138 for details. - return framework.NewStatus(framework.Skip) - } - state := &balancedAllocationPreScoreState{ - podRequests: podRequests, - } - cycleState.Write(balancedAllocationPreScoreStateKey, state) - return nil -} - -func getBalancedAllocationPreScoreState(cycleState *framework.CycleState) (*balancedAllocationPreScoreState, error) { - c, err := cycleState.Read(balancedAllocationPreScoreStateKey) - if err != nil { - return nil, fmt.Errorf("reading %q from cycleState: %w", balancedAllocationPreScoreStateKey, err) - } - - s, ok := c.(*balancedAllocationPreScoreState) - if !ok { - return nil, fmt.Errorf("invalid PreScore state, got type %T", c) - } - return s, nil -} - -// Name returns name of the plugin. It is used in logs, etc. -func (ba *BalancedAllocation) Name() string { - return BalancedAllocationName -} - -// Score invoked at the score extension point. -func (ba *BalancedAllocation) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - s, err := getBalancedAllocationPreScoreState(state) - if err != nil { - s = &balancedAllocationPreScoreState{podRequests: ba.calculatePodResourceRequestList(pod, ba.resources)} - if ba.isBestEffortPod(s.podRequests) { - return 0, nil - } - } - - // ba.score favors nodes with balanced resource usage rate. - // It calculates the standard deviation for those resources and prioritizes the node based on how close the usage of those resources is to each other. - // Detail: score = (1 - std) * MaxNodeScore, where std is calculated by the root square of Σ((fraction(i)-mean)^2)/len(resources) - // The algorithm is partly inspired by: - // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" - return ba.score(ctx, pod, nodeInfo, s.podRequests) -} - -// ScoreExtensions of the Score plugin. -func (ba *BalancedAllocation) ScoreExtensions() framework.ScoreExtensions { - return nil -} - -// NewBalancedAllocation initializes a new plugin and returns it. -func NewBalancedAllocation(_ context.Context, baArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { - args, ok := baArgs.(*config.NodeResourcesBalancedAllocationArgs) - if !ok { - return nil, fmt.Errorf("want args to be of type NodeResourcesBalancedAllocationArgs, got %T", baArgs) - } - - if err := validation.ValidateNodeResourcesBalancedAllocationArgs(nil, args); err != nil { - return nil, err - } - - return &BalancedAllocation{ - handle: h, - resourceAllocationScorer: resourceAllocationScorer{ - Name: BalancedAllocationName, - enableInPlacePodVerticalScaling: fts.EnableInPlacePodVerticalScaling, - enablePodLevelResources: fts.EnablePodLevelResources, - scorer: balancedResourceScorer, - useRequested: true, - resources: args.Resources, - }, - }, nil -} - -func balancedResourceScorer(requested, allocable []int64) int64 { - var resourceToFractions []float64 - var totalFraction float64 - for i := range requested { - if allocable[i] == 0 { - continue - } - fraction := float64(requested[i]) / float64(allocable[i]) - if fraction > 1 { - fraction = 1 - } - totalFraction += fraction - resourceToFractions = append(resourceToFractions, fraction) - } - - std := 0.0 - - // For most cases, resources are limited to cpu and memory, the std could be simplified to std := (fraction1-fraction2)/2 - // len(fractions) > 2: calculate std based on the well-known formula - root square of Σ((fraction(i)-mean)^2)/len(fractions) - // Otherwise, set the std to zero is enough. - if len(resourceToFractions) == 2 { - std = math.Abs((resourceToFractions[0] - resourceToFractions[1]) / 2) - } else if len(resourceToFractions) > 2 { - mean := totalFraction / float64(len(resourceToFractions)) - var sum float64 - for _, fraction := range resourceToFractions { - sum = sum + (fraction-mean)*(fraction-mean) - } - std = math.Sqrt(sum / float64(len(resourceToFractions))) - } - - // STD (standard deviation) is always a positive value. 1-deviation lets the score to be higher for node which has least deviation and - // multiplying it with `MaxNodeScore` provides the scaling factor needed. - return int64((1 - std) * float64(framework.MaxNodeScore)) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go deleted file mode 100644 index d94554ad6d7..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go +++ /dev/null @@ -1,591 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderesources - -import ( - "context" - "fmt" - "strings" - - "github.com/google/go-cmp/cmp" //nolint:depguard - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/component-helpers/resource" - "k8s.io/klog/v2" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - schedutil "k8s.io/kubernetes/pkg/scheduler/util" -) - -var _ framework.PreFilterPlugin = &Fit{} -var _ framework.FilterPlugin = &Fit{} -var _ framework.EnqueueExtensions = &Fit{} -var _ framework.PreScorePlugin = &Fit{} -var _ framework.ScorePlugin = &Fit{} - -const ( - // Name is the name of the plugin used in the plugin registry and configurations. - Name = names.NodeResourcesFit - - // preFilterStateKey is the key in CycleState to NodeResourcesFit pre-computed data. - // Using the name of the plugin will likely help us avoid collisions with other plugins. - preFilterStateKey = "PreFilter" + Name - - // preScoreStateKey is the key in CycleState to NodeResourcesFit pre-computed data for Scoring. - preScoreStateKey = "PreScore" + Name -) - -// nodeResourceStrategyTypeMap maps strategy to scorer implementation -var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{ - config.LeastAllocated: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer { - resources := args.ScoringStrategy.Resources - return &resourceAllocationScorer{ - Name: string(config.LeastAllocated), - scorer: leastResourceScorer(resources), - resources: resources, - } - }, - config.MostAllocated: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer { - resources := args.ScoringStrategy.Resources - return &resourceAllocationScorer{ - Name: string(config.MostAllocated), - scorer: mostResourceScorer(resources), - resources: resources, - } - }, - config.RequestedToCapacityRatio: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer { - resources := args.ScoringStrategy.Resources - return &resourceAllocationScorer{ - Name: string(config.RequestedToCapacityRatio), - scorer: requestedToCapacityRatioScorer(resources, args.ScoringStrategy.RequestedToCapacityRatio.Shape), - resources: resources, - } - }, -} - -// Fit is a plugin that checks if a node has sufficient resources. -type Fit struct { - ignoredResources sets.Set[string] - ignoredResourceGroups sets.Set[string] - enableInPlacePodVerticalScaling bool - enableSidecarContainers bool - enableSchedulingQueueHint bool - enablePodLevelResources bool - handle framework.Handle - resourceAllocationScorer -} - -// ScoreExtensions of the Score plugin. -func (f *Fit) ScoreExtensions() framework.ScoreExtensions { - return nil -} - -// preFilterState computed at PreFilter and used at Filter. -type preFilterState struct { - framework.Resource -} - -// Clone the prefilter state. -func (s *preFilterState) Clone() framework.StateData { - return s -} - -// preScoreState computed at PreScore and used at Score. -type preScoreState struct { - // podRequests have the same order as the resources defined in NodeResourcesBalancedAllocationArgs.Resources, - // same for other place we store a list like that. - podRequests []int64 -} - -// Clone implements the mandatory Clone interface. We don't really copy the data since -// there is no need for that. -func (s *preScoreState) Clone() framework.StateData { - return s -} - -// PreScore calculates incoming pod's resource requests and writes them to the cycle state used. -func (f *Fit) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status { - state := &preScoreState{ - podRequests: f.calculatePodResourceRequestList(pod, f.resources), - } - cycleState.Write(preScoreStateKey, state) - return nil -} - -func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) { - c, err := cycleState.Read(preScoreStateKey) - if err != nil { - return nil, fmt.Errorf("reading %q from cycleState: %w", preScoreStateKey, err) - } - - s, ok := c.(*preScoreState) - if !ok { - return nil, fmt.Errorf("invalid PreScore state, got type %T", c) - } - return s, nil -} - -// Name returns name of the plugin. It is used in logs, etc. -func (f *Fit) Name() string { - return Name -} - -// NewFit initializes a new plugin and returns it. -func NewFit(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { - args, ok := plArgs.(*config.NodeResourcesFitArgs) - if !ok { - return nil, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", plArgs) - } - if err := validation.ValidateNodeResourcesFitArgs(nil, args); err != nil { - return nil, err - } - - if args.ScoringStrategy == nil { - return nil, fmt.Errorf("scoring strategy not specified") - } - - strategy := args.ScoringStrategy.Type - scorePlugin, exists := nodeResourceStrategyTypeMap[strategy] - if !exists { - return nil, fmt.Errorf("scoring strategy %s is not supported", strategy) - } - - return &Fit{ - ignoredResources: sets.New(args.IgnoredResources...), - ignoredResourceGroups: sets.New(args.IgnoredResourceGroups...), - enableInPlacePodVerticalScaling: fts.EnableInPlacePodVerticalScaling, - enableSidecarContainers: fts.EnableSidecarContainers, - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - handle: h, - enablePodLevelResources: fts.EnablePodLevelResources, - resourceAllocationScorer: *scorePlugin(args), - }, nil -} - -type ResourceRequestsOptions struct { - EnablePodLevelResources bool -} - -// computePodResourceRequest returns a framework.Resource that covers the largest -// width in each resource dimension. Because init-containers run sequentially, we collect -// the max in each dimension iteratively. In contrast, we sum the resource vectors for -// regular containers since they run simultaneously. -// -// # The resources defined for Overhead should be added to the calculated Resource request sum -// -// Example: -// -// Pod: -// -// InitContainers -// IC1: -// CPU: 2 -// Memory: 1G -// IC2: -// CPU: 2 -// Memory: 3G -// Containers -// C1: -// CPU: 2 -// Memory: 1G -// C2: -// CPU: 1 -// Memory: 1G -// -// Result: CPU: 3, Memory: 3G -// TODO(ndixita): modify computePodResourceRequest to accept opts of type -// ResourceRequestOptions as the second parameter. -func computePodResourceRequest(pod *v1.Pod, opts ResourceRequestsOptions) *preFilterState { - // pod hasn't scheduled yet so we don't need to worry about InPlacePodVerticalScalingEnabled - reqs := resource.PodRequests(pod, resource.PodResourcesOptions{ - // SkipPodLevelResources is set to false when PodLevelResources feature is enabled. - SkipPodLevelResources: !opts.EnablePodLevelResources, - }) - result := &preFilterState{} - result.SetMaxResource(reqs) - return result -} - -// PreFilter invoked at the prefilter extension point. -func (f *Fit) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - if !f.enableSidecarContainers && hasRestartableInitContainer(pod) { - // Scheduler will calculate resources usage for a Pod containing - // restartable init containers that will be equal or more than kubelet will - // require to run the Pod. So there will be no overbooking. However, to - // avoid the inconsistency in resource calculation between the scheduler - // and the older (before v1.28) kubelet, make the Pod unschedulable. - return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "Pod has a restartable init container and the SidecarContainers feature is disabled") - } - cycleState.Write(preFilterStateKey, computePodResourceRequest(pod, ResourceRequestsOptions{EnablePodLevelResources: f.enablePodLevelResources})) - return nil, nil -} - -// PreFilterExtensions returns prefilter extensions, pod add and remove. -func (f *Fit) PreFilterExtensions() framework.PreFilterExtensions { - return nil -} - -func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { - c, err := cycleState.Read(preFilterStateKey) - if err != nil { - // preFilterState doesn't exist, likely PreFilter wasn't invoked. - return nil, fmt.Errorf("error reading %q from cycleState: %w", preFilterStateKey, err) - } - - s, ok := c.(*preFilterState) - if !ok { - return nil, fmt.Errorf("%+v convert to NodeResourcesFit.preFilterState error", c) - } - return s, nil -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (f *Fit) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - podActionType := framework.Delete - if f.enableInPlacePodVerticalScaling { - // If InPlacePodVerticalScaling (KEP 1287) is enabled, then UpdatePodScaleDown event should be registered - // for this plugin since a Pod update may free up resources that make other Pods schedulable. - podActionType |= framework.UpdatePodScaleDown - } - - // A note about UpdateNodeTaint/UpdateNodeLabel event: - // Ideally, it's supposed to register only Add | UpdateNodeAllocatable because the only resource update could change the node resource fit plugin's result. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - nodeActionType := framework.Add | framework.UpdateNodeAllocatable | framework.UpdateNodeTaint | framework.UpdateNodeLabel - if f.enableSchedulingQueueHint { - // preCheck is not used when QHint is enabled, and hence Update event isn't necessary. - nodeActionType = framework.Add | framework.UpdateNodeAllocatable - } - - return []framework.ClusterEventWithHint{ - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: podActionType}, QueueingHintFn: f.isSchedulableAfterPodEvent}, - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}, QueueingHintFn: f.isSchedulableAfterNodeChange}, - }, nil -} - -// isSchedulableAfterPodEvent is invoked whenever a pod deleted or scaled down. It checks whether -// that change made a previously unschedulable pod schedulable. -func (f *Fit) isSchedulableAfterPodEvent(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalPod, modifiedPod, err := schedutil.As[*v1.Pod](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - if modifiedPod == nil { - if originalPod.Spec.NodeName == "" && originalPod.Status.NominatedNodeName == "" { - logger.V(5).Info("the deleted pod was unscheduled and it wouldn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod)) - return framework.QueueSkip, nil - } - - // any deletion event to a scheduled pod could make the unscheduled pod schedulable. - logger.V(5).Info("another scheduled pod was deleted, and it may make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod)) - return framework.Queue, nil - } - - if !f.enableInPlacePodVerticalScaling { - // If InPlacePodVerticalScaling (KEP 1287) is disabled, the pod scale down event cannot free up any resources. - logger.V(5).Info("another pod was modified, but InPlacePodVerticalScaling is disabled, so it doesn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - - if !f.isSchedulableAfterPodScaleDown(pod, originalPod, modifiedPod) { - if loggerV := logger.V(10); loggerV.Enabled() { - // Log more information. - loggerV.Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod), "diff", cmp.Diff(originalPod, modifiedPod)) - } else { - logger.V(5).Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - } - return framework.QueueSkip, nil - } - - logger.V(5).Info("another scheduled pod or the target pod itself got scaled down, and it may make the unscheduled pod schedulable", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.Queue, nil -} - -// isSchedulableAfterPodScaleDown checks whether the scale down event may make the target pod schedulable. Specifically: -// - Returns true when the update event is for the target pod itself. -// - Returns true when the update event shows a scheduled pod's resource request that the target pod also requests got reduced. -func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod *v1.Pod) bool { - if modifiedPod.UID == targetPod.UID { - // If the scaling down event is for targetPod, it would make targetPod schedulable. - return true - } - - if modifiedPod.Spec.NodeName == "" { - // If the update event is for a unscheduled Pod, - // it wouldn't make targetPod schedulable. - return false - } - - // the other pod was scheduled, so modification or deletion may free up some resources. - originalMaxResourceReq, modifiedMaxResourceReq := &framework.Resource{}, &framework.Resource{} - originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})) - modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})) - - // check whether the resource request of the modified pod is less than the original pod. - podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}) - for rName, rValue := range podRequests { - if rValue.IsZero() { - // We only care about the resources requested by the pod we are trying to schedule. - continue - } - switch rName { - case v1.ResourceCPU: - if originalMaxResourceReq.MilliCPU > modifiedMaxResourceReq.MilliCPU { - return true - } - case v1.ResourceMemory: - if originalMaxResourceReq.Memory > modifiedMaxResourceReq.Memory { - return true - } - case v1.ResourceEphemeralStorage: - if originalMaxResourceReq.EphemeralStorage > modifiedMaxResourceReq.EphemeralStorage { - return true - } - default: - if schedutil.IsScalarResourceName(rName) && originalMaxResourceReq.ScalarResources[rName] > modifiedMaxResourceReq.ScalarResources[rName] { - return true - } - } - } - return false -} - -// isSchedulableAfterNodeChange is invoked whenever a node added or changed. It checks whether -// that change could make a previously unschedulable pod schedulable. -func (f *Fit) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalNode, modifiedNode, err := schedutil.As[*v1.Node](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - // Leaving in the queue, since the pod won't fit into the modified node anyway. - if !isFit(pod, modifiedNode, ResourceRequestsOptions{EnablePodLevelResources: f.enablePodLevelResources}) { - logger.V(5).Info("node was created or updated, but it doesn't have enough resource(s) to accommodate this pod", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.QueueSkip, nil - } - // The pod will fit, so since it's add, unblock scheduling. - if originalNode == nil { - logger.V(5).Info("node was added and it might fit the pod's resource requests", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - // The pod will fit, but since there was no increase in available resources, the change won't make the pod schedulable. - if !haveAnyRequestedResourcesIncreased(pod, originalNode, modifiedNode, ResourceRequestsOptions{EnablePodLevelResources: f.enablePodLevelResources}) { - logger.V(5).Info("node was updated, but haven't changed the pod's resource requestments fit assessment", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.QueueSkip, nil - } - - logger.V(5).Info("node was updated, and may now fit the pod's resource requests", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil -} - -// haveAnyRequestedResourcesIncreased returns true if any of the resources requested by the pod have increased or if allowed pod number increased. -func haveAnyRequestedResourcesIncreased(pod *v1.Pod, originalNode, modifiedNode *v1.Node, opts ResourceRequestsOptions) bool { - podRequest := computePodResourceRequest(pod, opts) - originalNodeInfo := framework.NewNodeInfo() - originalNodeInfo.SetNode(originalNode) - modifiedNodeInfo := framework.NewNodeInfo() - modifiedNodeInfo.SetNode(modifiedNode) - - if modifiedNodeInfo.Allocatable.AllowedPodNumber > originalNodeInfo.Allocatable.AllowedPodNumber { - return true - } - - if podRequest.MilliCPU == 0 && - podRequest.Memory == 0 && - podRequest.EphemeralStorage == 0 && - len(podRequest.ScalarResources) == 0 { - return false - } - - if (podRequest.MilliCPU > 0 && modifiedNodeInfo.Allocatable.MilliCPU > originalNodeInfo.Allocatable.MilliCPU) || - (podRequest.Memory > 0 && modifiedNodeInfo.Allocatable.Memory > originalNodeInfo.Allocatable.Memory) || - (podRequest.EphemeralStorage > 0 && modifiedNodeInfo.Allocatable.EphemeralStorage > originalNodeInfo.Allocatable.EphemeralStorage) { - return true - } - - for rName, rQuant := range podRequest.ScalarResources { - // Skip in case request quantity is zero - if rQuant == 0 { - continue - } - - if modifiedNodeInfo.Allocatable.ScalarResources[rName] > originalNodeInfo.Allocatable.ScalarResources[rName] { - return true - } - } - return false -} - -// isFit checks if the pod fits the node. If the node is nil, it returns false. -// It constructs a fake NodeInfo object for the node and checks if the pod fits the node. -func isFit(pod *v1.Pod, node *v1.Node, opts ResourceRequestsOptions) bool { - if node == nil { - return false - } - nodeInfo := framework.NewNodeInfo() - nodeInfo.SetNode(node) - return len(Fits(pod, nodeInfo, opts)) == 0 -} - -// Filter invoked at the filter extension point. -// Checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. -// It returns a list of insufficient resources, if empty, then the node has all the resources requested by the pod. -func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - s, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - - insufficientResources := fitsRequest(s, nodeInfo, f.ignoredResources, f.ignoredResourceGroups) - - if len(insufficientResources) != 0 { - // We will keep all failure reasons. - failureReasons := make([]string, 0, len(insufficientResources)) - for i := range insufficientResources { - failureReasons = append(failureReasons, insufficientResources[i].Reason) - } - return framework.NewStatus(framework.Unschedulable, failureReasons...) - } - return nil -} - -func hasRestartableInitContainer(pod *v1.Pod) bool { - for _, c := range pod.Spec.InitContainers { - if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways { - return true - } - } - return false -} - -// InsufficientResource describes what kind of resource limit is hit and caused the pod to not fit the node. -type InsufficientResource struct { - ResourceName v1.ResourceName - // We explicitly have a parameter for reason to avoid formatting a message on the fly - // for common resources, which is expensive for cluster autoscaler simulations. - Reason string - Requested int64 - Used int64 - Capacity int64 -} - -// Fits checks if node have enough resources to host the pod. -func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo, opts ResourceRequestsOptions) []InsufficientResource { - return fitsRequest(computePodResourceRequest(pod, opts), nodeInfo, nil, nil) -} - -func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.Set[string]) []InsufficientResource { - insufficientResources := make([]InsufficientResource, 0, 4) - - allowedPodNumber := nodeInfo.Allocatable.AllowedPodNumber - if len(nodeInfo.Pods)+1 > allowedPodNumber { - insufficientResources = append(insufficientResources, InsufficientResource{ - ResourceName: v1.ResourcePods, - Reason: "Too many pods", - Requested: 1, - Used: int64(len(nodeInfo.Pods)), - Capacity: int64(allowedPodNumber), - }) - } - - if podRequest.MilliCPU == 0 && - podRequest.Memory == 0 && - podRequest.EphemeralStorage == 0 && - len(podRequest.ScalarResources) == 0 { - return insufficientResources - } - - if podRequest.MilliCPU > 0 && podRequest.MilliCPU > (nodeInfo.Allocatable.MilliCPU-nodeInfo.Requested.MilliCPU) { - insufficientResources = append(insufficientResources, InsufficientResource{ - ResourceName: v1.ResourceCPU, - Reason: "Insufficient cpu", - Requested: podRequest.MilliCPU, - Used: nodeInfo.Requested.MilliCPU, - Capacity: nodeInfo.Allocatable.MilliCPU, - }) - } - if podRequest.Memory > 0 && podRequest.Memory > (nodeInfo.Allocatable.Memory-nodeInfo.Requested.Memory) { - insufficientResources = append(insufficientResources, InsufficientResource{ - ResourceName: v1.ResourceMemory, - Reason: "Insufficient memory", - Requested: podRequest.Memory, - Used: nodeInfo.Requested.Memory, - Capacity: nodeInfo.Allocatable.Memory, - }) - } - if podRequest.EphemeralStorage > 0 && - podRequest.EphemeralStorage > (nodeInfo.Allocatable.EphemeralStorage-nodeInfo.Requested.EphemeralStorage) { - insufficientResources = append(insufficientResources, InsufficientResource{ - ResourceName: v1.ResourceEphemeralStorage, - Reason: "Insufficient ephemeral-storage", - Requested: podRequest.EphemeralStorage, - Used: nodeInfo.Requested.EphemeralStorage, - Capacity: nodeInfo.Allocatable.EphemeralStorage, - }) - } - - for rName, rQuant := range podRequest.ScalarResources { - // Skip in case request quantity is zero - if rQuant == 0 { - continue - } - - if v1helper.IsExtendedResourceName(rName) { - // If this resource is one of the extended resources that should be ignored, we will skip checking it. - // rName is guaranteed to have a slash due to API validation. - var rNamePrefix string - if ignoredResourceGroups.Len() > 0 { - rNamePrefix = strings.Split(string(rName), "/")[0] - } - if ignoredExtendedResources.Has(string(rName)) || ignoredResourceGroups.Has(rNamePrefix) { - continue - } - } - - if rQuant > (nodeInfo.Allocatable.ScalarResources[rName] - nodeInfo.Requested.ScalarResources[rName]) { - insufficientResources = append(insufficientResources, InsufficientResource{ - ResourceName: rName, - Reason: fmt.Sprintf("Insufficient %v", rName), - Requested: podRequest.ScalarResources[rName], - Used: nodeInfo.Requested.ScalarResources[rName], - Capacity: nodeInfo.Allocatable.ScalarResources[rName], - }) - } - } - - return insufficientResources -} - -// Score invoked at the Score extension point. -func (f *Fit) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - s, err := getPreScoreState(state) - if err != nil { - s = &preScoreState{ - podRequests: f.calculatePodResourceRequestList(pod, f.resources), - } - } - - return f.score(ctx, pod, nodeInfo, s.podRequests) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go deleted file mode 100644 index a19969e90de..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/least_allocated.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderesources - -import ( - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// leastResourceScorer favors nodes with fewer requested resources. -// It calculates the percentage of memory, CPU and other resources requested by pods scheduled on the node, and -// prioritizes based on the minimum of the average of the fraction of requested to capacity. -// -// Details: -// (cpu((capacity-requested)*MaxNodeScore*cpuWeight/capacity) + memory((capacity-requested)*MaxNodeScore*memoryWeight/capacity) + ...)/weightSum -func leastResourceScorer(resources []config.ResourceSpec) func([]int64, []int64) int64 { - return func(requested, allocable []int64) int64 { - var nodeScore, weightSum int64 - for i := range requested { - if allocable[i] == 0 { - continue - } - weight := resources[i].Weight - resourceScore := leastRequestedScore(requested[i], allocable[i]) - nodeScore += resourceScore * weight - weightSum += weight - } - if weightSum == 0 { - return 0 - } - return nodeScore / weightSum - } -} - -// The unused capacity is calculated on a scale of 0-MaxNodeScore -// 0 being the lowest priority and `MaxNodeScore` being the highest. -// The more unused resources the higher the score is. -func leastRequestedScore(requested, capacity int64) int64 { - if capacity == 0 { - return 0 - } - if requested > capacity { - return 0 - } - - return ((capacity - requested) * framework.MaxNodeScore) / capacity -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go deleted file mode 100644 index 41425698835..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderesources - -import ( - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// mostResourceScorer favors nodes with most requested resources. -// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes -// based on the maximum of the average of the fraction of requested to capacity. -// -// Details: -// (cpu(MaxNodeScore * requested * cpuWeight / capacity) + memory(MaxNodeScore * requested * memoryWeight / capacity) + ...) / weightSum -func mostResourceScorer(resources []config.ResourceSpec) func(requested, allocable []int64) int64 { - return func(requested, allocable []int64) int64 { - var nodeScore, weightSum int64 - for i := range requested { - if allocable[i] == 0 { - continue - } - weight := resources[i].Weight - resourceScore := mostRequestedScore(requested[i], allocable[i]) - nodeScore += resourceScore * weight - weightSum += weight - } - if weightSum == 0 { - return 0 - } - return nodeScore / weightSum - } -} - -// The used capacity is calculated on a scale of 0-MaxNodeScore (MaxNodeScore is -// constant with value set to 100). -// 0 being the lowest priority and 100 being the highest. -// The more resources are used the higher the score is. This function -// is almost a reversed version of noderesources.leastRequestedScore. -func mostRequestedScore(requested, capacity int64) int64 { - if capacity == 0 { - return 0 - } - if requested > capacity { - // `requested` might be greater than `capacity` because pods with no - // requests get minimum values. - requested = capacity - } - - return (requested * framework.MaxNodeScore) / capacity -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go deleted file mode 100644 index df5e2ff4733..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderesources - -import ( - "math" - - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" -) - -const maxUtilization = 100 - -// buildRequestedToCapacityRatioScorerFunction allows users to apply bin packing -// on core resources like CPU, Memory as well as extended resources like accelerators. -func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape helper.FunctionShape, resources []config.ResourceSpec) func([]int64, []int64) int64 { - rawScoringFunction := helper.BuildBrokenLinearFunction(scoringFunctionShape) - resourceScoringFunction := func(requested, capacity int64) int64 { - if capacity == 0 || requested > capacity { - return rawScoringFunction(maxUtilization) - } - - return rawScoringFunction(requested * maxUtilization / capacity) - } - return func(requested, allocable []int64) int64 { - var nodeScore, weightSum int64 - for i := range requested { - if allocable[i] == 0 { - continue - } - weight := resources[i].Weight - resourceScore := resourceScoringFunction(requested[i], allocable[i]) - if resourceScore > 0 { - nodeScore += resourceScore * weight - weightSum += weight - } - } - if weightSum == 0 { - return 0 - } - return int64(math.Round(float64(nodeScore) / float64(weightSum))) - } -} - -func requestedToCapacityRatioScorer(resources []config.ResourceSpec, shape []config.UtilizationShapePoint) func([]int64, []int64) int64 { - shapes := make([]helper.FunctionShapePoint, 0, len(shape)) - for _, point := range shape { - shapes = append(shapes, helper.FunctionShapePoint{ - Utilization: int64(point.Utilization), - // MaxCustomPriorityScore may diverge from the max score used in the scheduler and defined by MaxNodeScore, - // therefore we need to scale the score returned by requested to capacity ratio to the score range - // used by the scheduler. - Score: int64(point.Score) * (framework.MaxNodeScore / config.MaxCustomPriorityScore), - }) - } - - return buildRequestedToCapacityRatioScorerFunction(shapes, resources) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go deleted file mode 100644 index 71eb9138bd4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderesources - -import ( - "context" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/klog/v2" - - resourcehelper "k8s.io/component-helpers/resource" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/framework" - schedutil "k8s.io/kubernetes/pkg/scheduler/util" -) - -// scorer is decorator for resourceAllocationScorer -type scorer func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer - -// resourceAllocationScorer contains information to calculate resource allocation score. -type resourceAllocationScorer struct { - Name string - enableInPlacePodVerticalScaling bool - enablePodLevelResources bool - // used to decide whether to use Requested or NonZeroRequested for - // cpu and memory. - useRequested bool - scorer func(requested, allocable []int64) int64 - resources []config.ResourceSpec -} - -// score will use `scorer` function to calculate the score. -func (r *resourceAllocationScorer) score( - ctx context.Context, - pod *v1.Pod, - nodeInfo *framework.NodeInfo, - podRequests []int64) (int64, *framework.Status) { - logger := klog.FromContext(ctx) - node := nodeInfo.Node() - - // resources not set, nothing scheduled, - if len(r.resources) == 0 { - return 0, framework.NewStatus(framework.Error, "resources not found") - } - - requested := make([]int64, len(r.resources)) - allocatable := make([]int64, len(r.resources)) - for i := range r.resources { - alloc, req := r.calculateResourceAllocatableRequest(logger, nodeInfo, v1.ResourceName(r.resources[i].Name), podRequests[i]) - // Only fill the extended resource entry when it's non-zero. - if alloc == 0 { - continue - } - allocatable[i] = alloc - requested[i] = req - } - - score := r.scorer(requested, allocatable) - - if loggerV := logger.V(10); loggerV.Enabled() { // Serializing these maps is costly. - loggerV.Info("Listed internal info for allocatable resources, requested resources and score", "pod", - klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name, - "allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score, - ) - } - - return score, nil -} - -// calculateResourceAllocatableRequest returns 2 parameters: -// - 1st param: quantity of allocatable resource on the node. -// - 2nd param: aggregated quantity of requested resource on the node. -// Note: if it's an extended resource, and the pod doesn't request it, (0, 0) is returned. -func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(logger klog.Logger, nodeInfo *framework.NodeInfo, resource v1.ResourceName, podRequest int64) (int64, int64) { - requested := nodeInfo.NonZeroRequested - if r.useRequested { - requested = nodeInfo.Requested - } - - // If it's an extended resource, and the pod doesn't request it. We return (0, 0) - // as an implication to bypass scoring on this resource. - if podRequest == 0 && schedutil.IsScalarResourceName(resource) { - return 0, 0 - } - switch resource { - case v1.ResourceCPU: - return nodeInfo.Allocatable.MilliCPU, (requested.MilliCPU + podRequest) - case v1.ResourceMemory: - return nodeInfo.Allocatable.Memory, (requested.Memory + podRequest) - case v1.ResourceEphemeralStorage: - return nodeInfo.Allocatable.EphemeralStorage, (nodeInfo.Requested.EphemeralStorage + podRequest) - default: - if _, exists := nodeInfo.Allocatable.ScalarResources[resource]; exists { - return nodeInfo.Allocatable.ScalarResources[resource], (nodeInfo.Requested.ScalarResources[resource] + podRequest) - } - } - logger.V(10).Info("Requested resource is omitted for node score calculation", "resourceName", resource) - return 0, 0 -} - -// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod -// the Overhead is added to the result. -func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, resourceName v1.ResourceName) int64 { - - opts := resourcehelper.PodResourcesOptions{ - UseStatusResources: r.enableInPlacePodVerticalScaling, - // SkipPodLevelResources is set to false when PodLevelResources feature is enabled. - SkipPodLevelResources: !r.enablePodLevelResources, - } - - if !r.useRequested { - opts.NonMissingContainerRequests = v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI), - } - } - - requests := resourcehelper.PodRequests(pod, opts) - - quantity := requests[resourceName] - if resourceName == v1.ResourceCPU { - return quantity.MilliValue() - } - return quantity.Value() -} - -func (r *resourceAllocationScorer) calculatePodResourceRequestList(pod *v1.Pod, resources []config.ResourceSpec) []int64 { - podRequests := make([]int64, len(resources)) - for i := range resources { - podRequests[i] = r.calculatePodResourceRequest(pod, v1.ResourceName(resources[i].Name)) - } - return podRequests -} - -func (r *resourceAllocationScorer) isBestEffortPod(podRequests []int64) bool { - for _, request := range podRequests { - if request != 0 { - return false - } - } - return true -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/test_util.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/test_util.go deleted file mode 100644 index fd4345117b8..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/test_util.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderesources - -import ( - "github.com/google/go-cmp/cmp/cmpopts" //nolint:depguard - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -var ( - ignoreBadValueDetail = cmpopts.IgnoreFields(field.Error{}, "BadValue", "Detail") - defaultResources = []config.ResourceSpec{ - {Name: string(v1.ResourceCPU), Weight: 1}, - {Name: string(v1.ResourceMemory), Weight: 1}, - } - extendedRes = "abc.com/xyz" - extendedResourceSet = []config.ResourceSpec{ - {Name: string(v1.ResourceCPU), Weight: 1}, - {Name: string(v1.ResourceMemory), Weight: 1}, - {Name: extendedRes, Weight: 1}, - } -) - -func makeNode(node string, milliCPU, memory int64, extendedResource map[string]int64) *v1.Node { - resourceList := make(map[v1.ResourceName]resource.Quantity) - for res, quantity := range extendedResource { - resourceList[v1.ResourceName(res)] = *resource.NewQuantity(quantity, resource.DecimalSI) - } - resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(milliCPU, resource.DecimalSI) - resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI) - return &v1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: node}, - Status: v1.NodeStatus{ - Capacity: resourceList, - Allocatable: resourceList, - }, - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go deleted file mode 100644 index 50a4264aa1a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodeunschedulable - -import ( - "context" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - v1helper "k8s.io/component-helpers/scheduling/corev1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// NodeUnschedulable plugin filters nodes that set node.Spec.Unschedulable=true unless -// the pod tolerates {key=node.kubernetes.io/unschedulable, effect:NoSchedule} taint. -type NodeUnschedulable struct { - enableSchedulingQueueHint bool -} - -var _ framework.FilterPlugin = &NodeUnschedulable{} -var _ framework.EnqueueExtensions = &NodeUnschedulable{} - -// Name is the name of the plugin used in the plugin registry and configurations. -const Name = names.NodeUnschedulable - -const ( - // ErrReasonUnknownCondition is used for NodeUnknownCondition predicate error. - ErrReasonUnknownCondition = "node(s) had unknown conditions" - // ErrReasonUnschedulable is used for NodeUnschedulable predicate error. - ErrReasonUnschedulable = "node(s) were unschedulable" -) - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *NodeUnschedulable) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - if !pl.enableSchedulingQueueHint { - return []framework.ClusterEventWithHint{ - // A note about UpdateNodeLabel event: - // Ideally, it's supposed to register only Add | UpdateNodeTaint because UpdateNodeLabel will never change the result from this plugin. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeTaint | framework.UpdateNodeLabel}, QueueingHintFn: pl.isSchedulableAfterNodeChange}, - }, nil - } - - return []framework.ClusterEventWithHint{ - // When QueueingHint is enabled, we don't use preCheck and we don't need to register UpdateNodeLabel event. - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeTaint}, QueueingHintFn: pl.isSchedulableAfterNodeChange}, - // When the QueueingHint feature is enabled, - // the scheduling queue uses Pod/Update Queueing Hint - // to determine whether a Pod's update makes the Pod schedulable or not. - // https://github.com/kubernetes/kubernetes/pull/122234 - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.UpdatePodToleration}, QueueingHintFn: pl.isSchedulableAfterPodTolerationChange}, - }, nil -} - -// isSchedulableAfterPodTolerationChange is invoked whenever a pod's toleration changed. -func (pl *NodeUnschedulable) isSchedulableAfterPodTolerationChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - _, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - if pod.UID == modifiedPod.UID { - // Note: we don't need to check oldPod tolerations the taint because: - // - Taint can be added, but can't be modified nor removed. - // - If the Pod already has the toleration, it shouldn't have rejected by this plugin in the first place. - // Meaning, here this Pod has been rejected by this plugin, and hence it shouldn't have the toleration yet. - if v1helper.TolerationsTolerateTaint(modifiedPod.Spec.Tolerations, &v1.Taint{ - Key: v1.TaintNodeUnschedulable, - Effect: v1.TaintEffectNoSchedule, - }) { - // This update makes the pod tolerate the unschedulable taint. - logger.V(5).Info("a new toleration is added for the unschedulable Pod, and it may make it schedulable", "pod", klog.KObj(modifiedPod)) - return framework.Queue, nil - } - logger.V(5).Info("a new toleration is added for the unschedulable Pod, but it's an unrelated toleration", "pod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - - logger.V(5).Info("a new toleration is added for a Pod, but it's an unrelated Pod and wouldn't change the TaintToleration plugin's decision", "pod", klog.KObj(modifiedPod)) - - return framework.QueueSkip, nil -} - -// isSchedulableAfterNodeChange is invoked for all node events reported by -// an informer. It checks whether that change made a previously unschedulable -// pod schedulable. -func (pl *NodeUnschedulable) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - // We queue this Pod when - - // 1. the node is updated from unschedulable to schedulable. - // 2. the node is added and is schedulable. - if (originalNode != nil && originalNode.Spec.Unschedulable && !modifiedNode.Spec.Unschedulable) || - (originalNode == nil && !modifiedNode.Spec.Unschedulable) { - logger.V(5).Info("node was created or updated, pod may be schedulable now", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - - logger.V(5).Info("node was created or updated, but it doesn't make this pod schedulable", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.QueueSkip, nil -} - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *NodeUnschedulable) Name() string { - return Name -} - -// Filter invoked at the filter extension point. -func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - node := nodeInfo.Node() - - if !node.Spec.Unschedulable { - return nil - } - - // If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`. - podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{ - Key: v1.TaintNodeUnschedulable, - Effect: v1.TaintEffectNoSchedule, - }) - if !podToleratesUnschedulable { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonUnschedulable) - } - - return nil -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, _ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) { - return &NodeUnschedulable{enableSchedulingQueueHint: fts.EnableSchedulingQueueHint}, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/OWNERS deleted file mode 100644 index be3245e85e1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-storage-approvers - - cofyc -reviewers: - - sig-storage-reviewers - - cofyc -labels: - - sig/storage diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go deleted file mode 100644 index 1a3f12d0363..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go +++ /dev/null @@ -1,621 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/rand" - corelisters "k8s.io/client-go/listers/core/v1" - storagelisters "k8s.io/client-go/listers/storage/v1" - ephemeral "k8s.io/component-helpers/storage/ephemeral" - storagehelpers "k8s.io/component-helpers/storage/volume" - csitrans "k8s.io/csi-translation-lib" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -const ( - // ErrReasonMaxVolumeCountExceeded is used for MaxVolumeCount predicate error. - ErrReasonMaxVolumeCountExceeded = "node(s) exceed max volume count" -) - -// InTreeToCSITranslator contains methods required to check migratable status -// and perform translations from InTree PV's to CSI -type InTreeToCSITranslator interface { - IsPVMigratable(pv *v1.PersistentVolume) bool - IsInlineMigratable(vol *v1.Volume) bool - IsMigratableIntreePluginByName(inTreePluginName string) bool - GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error) - GetCSINameFromInTreeName(pluginName string) (string, error) - TranslateInTreePVToCSI(logger klog.Logger, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) - TranslateInTreeInlineVolumeToCSI(logger klog.Logger, volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) -} - -// CSILimits is a plugin that checks node volume limits. -type CSILimits struct { - csiNodeLister storagelisters.CSINodeLister - pvLister corelisters.PersistentVolumeLister - pvcLister corelisters.PersistentVolumeClaimLister - scLister storagelisters.StorageClassLister - vaLister storagelisters.VolumeAttachmentLister - - enableCSIMigrationPortworx bool - randomVolumeIDPrefix string - - translator InTreeToCSITranslator -} - -var _ framework.PreFilterPlugin = &CSILimits{} -var _ framework.FilterPlugin = &CSILimits{} -var _ framework.EnqueueExtensions = &CSILimits{} - -// CSIName is the name of the plugin used in the plugin registry and configurations. -const CSIName = names.NodeVolumeLimits - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *CSILimits) Name() string { - return CSIName -} - -// EventsToRegister returns the possible events that may make a Pod. -// failed by this plugin schedulable. -func (pl *CSILimits) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - return []framework.ClusterEventWithHint{ - // We don't register any `QueueingHintFn` intentionally - // because any new CSINode could make pods that were rejected by CSI volumes schedulable. - {Event: framework.ClusterEvent{Resource: framework.CSINode, ActionType: framework.Add}}, - {Event: framework.ClusterEvent{Resource: framework.CSINode, ActionType: framework.Update}, QueueingHintFn: pl.isSchedulableAfterCSINodeUpdated}, - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Delete}, QueueingHintFn: pl.isSchedulableAfterPodDeleted}, - {Event: framework.ClusterEvent{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}, QueueingHintFn: pl.isSchedulableAfterPVCAdded}, - {Event: framework.ClusterEvent{Resource: framework.VolumeAttachment, ActionType: framework.Delete}, QueueingHintFn: pl.isSchedulableAfterVolumeAttachmentDeleted}, - }, nil -} - -func (pl *CSILimits) isSchedulableAfterPodDeleted(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - deletedPod, _, err := util.As[*v1.Pod](oldObj, newObj) - if err != nil { - return framework.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPodDeleted: %w", err) - } - - if len(deletedPod.Spec.Volumes) == 0 { - return framework.QueueSkip, nil - } - - if deletedPod.Spec.NodeName == "" && deletedPod.Status.NominatedNodeName == "" { - return framework.QueueSkip, nil - } - - for _, vol := range deletedPod.Spec.Volumes { - if vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil || pl.translator.IsInlineMigratable(&vol) { - return framework.Queue, nil - } - } - - logger.V(5).Info("The deleted pod does not impact the scheduling of the unscheduled pod", "deletedPod", klog.KObj(pod), "pod", klog.KObj(deletedPod)) - return framework.QueueSkip, nil -} - -func (pl *CSILimits) isSchedulableAfterPVCAdded(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - _, addedPvc, err := util.As[*v1.PersistentVolumeClaim](oldObj, newObj) - if err != nil { - return framework.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPVCAdded: %w", err) - } - - if addedPvc.Namespace != pod.Namespace { - return framework.QueueSkip, nil - } - - for _, volumes := range pod.Spec.Volumes { - var pvcName string - switch { - case volumes.PersistentVolumeClaim != nil: - pvcName = volumes.PersistentVolumeClaim.ClaimName - case volumes.Ephemeral != nil: - pvcName = ephemeral.VolumeClaimName(pod, &volumes) - default: - // Volume is not using a PVC, ignore - continue - } - - if pvcName == addedPvc.Name { - logger.V(5).Info("PVC that is referred from the pod was created, which might make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(addedPvc)) - return framework.Queue, nil - } - } - - logger.V(5).Info("PVC irrelevant to the Pod was created, which doesn't make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(addedPvc)) - return framework.QueueSkip, nil -} - -func (pl *CSILimits) isSchedulableAfterVolumeAttachmentDeleted(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - deletedVolumeAttachment, _, err := util.As[*storagev1.VolumeAttachment](oldObj, newObj) - if err != nil { - return framework.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterVolumeAttachmentDeleted: %w", err) - } - - for _, vol := range pod.Spec.Volumes { - // Check if the pod volume uses a PVC - // If it does, return Queue - if vol.PersistentVolumeClaim != nil { - logger.V(5).Info("Pod volume uses PersistentVolumeClaim, which might make this pod schedulable due to VolumeAttachment deletion", "pod", klog.KObj(pod), "volumeAttachment", klog.KObj(deletedVolumeAttachment), "volume", vol.Name) - return framework.Queue, nil - } - - if !pl.translator.IsInlineMigratable(&vol) { - continue - } - - translatedPV, err := pl.translator.TranslateInTreeInlineVolumeToCSI(logger, &vol, pod.Namespace) - if err != nil || translatedPV == nil { - return framework.Queue, fmt.Errorf("converting volume(%s) from inline to csi: %w", vol.Name, err) - } - - if translatedPV.Spec.CSI != nil && deletedVolumeAttachment.Spec.Attacher == translatedPV.Spec.CSI.Driver { - // deleted VolumeAttachment Attacher matches the translated PV CSI driver - logger.V(5).Info("Pod volume is an Inline Migratable volume that matches the CSI driver, which might make this pod schedulable due to VolumeAttachment deletion", - "pod", klog.KObj(pod), "volumeAttachment", klog.KObj(deletedVolumeAttachment), - "volume", vol.Name, "csiDriver", translatedPV.Spec.CSI.Driver, - ) - return framework.Queue, nil - } - } - - logger.V(5).Info("the VolumeAttachment deletion wouldn't make this pod schedulable because the pod has no volume related to a deleted VolumeAttachment", - "pod", klog.KObj(pod), "volumeAttachment", klog.KObj(deletedVolumeAttachment)) - return framework.QueueSkip, nil -} - -func (pl *CSILimits) isSchedulableAfterCSINodeUpdated(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - oldCSINode, newCSINode, err := util.As[*storagev1.CSINode](oldObj, newObj) - if err != nil { - return framework.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterCSINodeUpdated: %w", err) - } - - oldLimits := make(map[string]int32) - for _, d := range oldCSINode.Spec.Drivers { - var count int32 - if d.Allocatable != nil && d.Allocatable.Count != nil { - count = *d.Allocatable.Count - } - oldLimits[d.Name] = count - } - - // Compare new driver limits vs. old. If limit increased, queue pod. - for _, d := range newCSINode.Spec.Drivers { - var oldLimit int32 - if val, exists := oldLimits[d.Name]; exists { - oldLimit = val - } - newLimit := int32(0) - if d.Allocatable != nil && d.Allocatable.Count != nil { - newLimit = *d.Allocatable.Count - } - - if newLimit > oldLimit { - logger.V(5).Info("CSINode driver limit increased, might make this pod schedulable", - "pod", klog.KObj(pod), - "driver", d.Name, - "oldLimit", oldLimit, - "newLimit", newLimit, - ) - return framework.Queue, nil - } - } - - // If no driver limit was increased, skip queueing. - return framework.QueueSkip, nil -} - -// PreFilter invoked at the prefilter extension point -// -// If the pod haven't those types of volumes, we'll skip the Filter phase -func (pl *CSILimits) PreFilter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - volumes := pod.Spec.Volumes - for i := range volumes { - vol := &volumes[i] - if vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil || pl.translator.IsInlineMigratable(vol) { - return nil, nil - } - } - - return nil, framework.NewStatus(framework.Skip) -} - -// PreFilterExtensions returns prefilter extensions, pod add and remove. -func (pl *CSILimits) PreFilterExtensions() framework.PreFilterExtensions { - return nil -} - -// Filter invoked at the filter extension point. -func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - // If the new pod doesn't have any volume attached to it, the predicate will always be true - if len(pod.Spec.Volumes) == 0 { - return nil - } - - node := nodeInfo.Node() - - logger := klog.FromContext(ctx) - - csiNode, err := pl.csiNodeLister.Get(node.Name) - if err != nil { - // TODO: return the error once CSINode is created by default (2 releases) - logger.V(5).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) - } - - // Count CSI volumes from the new pod - newVolumes := make(map[string]string) - if err := pl.filterAttachableVolumes(logger, pod, csiNode, true /* new pod */, newVolumes); err != nil { - if apierrors.IsNotFound(err) { - // PVC is not found. This Pod will never be schedulable until PVC is created. - return framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error()) - } - return framework.AsStatus(err) - } - - // If the pod doesn't have any new CSI volumes, the predicate will always be true - if len(newVolumes) == 0 { - return nil - } - - // If the node doesn't have volume limits, the predicate will always be true - nodeVolumeLimits := getVolumeLimits(csiNode) - if len(nodeVolumeLimits) == 0 { - return nil - } - - // Count CSI volumes from existing pods - attachedVolumes := make(map[string]string) - for _, existingPod := range nodeInfo.Pods { - if err := pl.filterAttachableVolumes(logger, existingPod.Pod, csiNode, false /* existing pod */, attachedVolumes); err != nil { - return framework.AsStatus(err) - } - } - - attachedVolumeCount := map[string]int{} - for volumeUniqueName, driverName := range attachedVolumes { - // Don't count single volume used in multiple pods more than once - delete(newVolumes, volumeUniqueName) - attachedVolumeCount[driverName]++ - } - - // Count CSI volumes from VolumeAttachments - volumeAttachments, err := pl.getNodeVolumeAttachmentInfo(logger, node.Name) - if err != nil { - return framework.AsStatus(err) - } - - for volumeUniqueName, driverName := range volumeAttachments { - // Avoid double-counting volumes already used by existing pods - if _, exists := attachedVolumes[volumeUniqueName]; !exists { - attachedVolumeCount[driverName]++ - } - } - - // Count the new volumes count per driver - newVolumeCount := map[string]int{} - for _, driverName := range newVolumes { - newVolumeCount[driverName]++ - } - - for driverName, count := range newVolumeCount { - maxVolumeLimit, ok := nodeVolumeLimits[driverName] - if ok { - currentVolumeCount := attachedVolumeCount[driverName] - logger.V(5).Info("Found plugin volume limits", "node", node.Name, "driverName", driverName, - "maxLimits", maxVolumeLimit, "currentVolumeCount", currentVolumeCount, "newVolumeCount", count, - "pod", klog.KObj(pod)) - if currentVolumeCount+count > int(maxVolumeLimit) { - return framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded) - } - } - } - - return nil -} - -// filterAttachableVolumes filters the attachable volumes from the pod and adds them to the result map. -// The result map is a map of volumeUniqueName to driver name. The volumeUniqueName is a unique name for -// the volume in the format of "driverName/volumeHandle". And driver name is the CSI driver name. -func (pl *CSILimits) filterAttachableVolumes( - logger klog.Logger, pod *v1.Pod, csiNode *storagev1.CSINode, newPod bool, result map[string]string) error { - for _, vol := range pod.Spec.Volumes { - pvcName := "" - isEphemeral := false - switch { - case vol.PersistentVolumeClaim != nil: - // Normal CSI volume can only be used through PVC - pvcName = vol.PersistentVolumeClaim.ClaimName - case vol.Ephemeral != nil: - // Generic ephemeral inline volumes also use a PVC, - // just with a computed name and certain ownership. - // That is checked below once the pvc object is - // retrieved. - pvcName = ephemeral.VolumeClaimName(pod, &vol) - isEphemeral = true - default: - // Inline Volume does not have PVC. - // Need to check if CSI migration is enabled for this inline volume. - // - If the volume is migratable and CSI migration is enabled, need to count it - // as well. - // - If the volume is not migratable, it will be count in non_csi filter. - if err := pl.checkAttachableInlineVolume(logger, &vol, csiNode, pod, result); err != nil { - return err - } - - continue - } - - if pvcName == "" { - return fmt.Errorf("PersistentVolumeClaim had no name") - } - - pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName) - - if err != nil { - if newPod { - // The PVC is required to proceed with - // scheduling of a new pod because it cannot - // run without it. Bail out immediately. - return fmt.Errorf("looking up PVC %s/%s: %w", pod.Namespace, pvcName, err) - } - // If the PVC is invalid, we don't count the volume because - // there's no guarantee that it belongs to the running predicate. - logger.V(5).Info("Unable to look up PVC info", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName)) - continue - } - - // The PVC for an ephemeral volume must be owned by the pod. - if isEphemeral { - if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil { - return err - } - } - - driverName, volumeHandle := pl.getCSIDriverInfo(logger, csiNode, pvc) - if driverName == "" || volumeHandle == "" { - logger.V(5).Info("Could not find a CSI driver name or volume handle, not counting volume") - continue - } - - volumeUniqueName := getVolumeUniqueName(driverName, volumeHandle) - result[volumeUniqueName] = driverName - } - return nil -} - -// checkAttachableInlineVolume takes an inline volume and add to the result map if the -// volume is migratable and CSI migration for this plugin has been enabled. -func (pl *CSILimits) checkAttachableInlineVolume(logger klog.Logger, vol *v1.Volume, csiNode *storagev1.CSINode, - pod *v1.Pod, result map[string]string) error { - if !pl.translator.IsInlineMigratable(vol) { - return nil - } - // Check if the intree provisioner CSI migration has been enabled. - inTreeProvisionerName, err := pl.translator.GetInTreePluginNameFromSpec(nil, vol) - if err != nil { - return fmt.Errorf("looking up provisioner name for volume %s: %w", vol.Name, err) - } - if !isCSIMigrationOn(csiNode, inTreeProvisionerName, pl.enableCSIMigrationPortworx) { - csiNodeName := "" - if csiNode != nil { - csiNodeName = csiNode.Name - } - logger.V(5).Info("CSI Migration is not enabled for provisioner", "provisioner", inTreeProvisionerName, - "pod", klog.KObj(pod), "csiNode", csiNodeName) - return nil - } - // Do translation for the in-tree volume. - translatedPV, err := pl.translator.TranslateInTreeInlineVolumeToCSI(logger, vol, pod.Namespace) - if err != nil || translatedPV == nil { - return fmt.Errorf("converting volume(%s) from inline to csi: %w", vol.Name, err) - } - driverName, err := pl.translator.GetCSINameFromInTreeName(inTreeProvisionerName) - if err != nil { - return fmt.Errorf("looking up CSI driver name for provisioner %s: %w", inTreeProvisionerName, err) - } - // TranslateInTreeInlineVolumeToCSI should translate inline volume to CSI. If it is not set, - // the volume does not support inline. Skip the count. - if translatedPV.Spec.PersistentVolumeSource.CSI == nil { - return nil - } - volumeUniqueName := getVolumeUniqueName(driverName, translatedPV.Spec.PersistentVolumeSource.CSI.VolumeHandle) - result[volumeUniqueName] = driverName - return nil -} - -// getCSIDriverInfo returns the CSI driver name and volume ID of a given PVC. -// If the PVC is from a migrated in-tree plugin, this function will return -// the information of the CSI driver that the plugin has been migrated to. -func (pl *CSILimits) getCSIDriverInfo(logger klog.Logger, csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { - pvName := pvc.Spec.VolumeName - - if pvName == "" { - logger.V(5).Info("Persistent volume had no name for claim", "PVC", klog.KObj(pvc)) - return pl.getCSIDriverInfoFromSC(logger, csiNode, pvc) - } - - pv, err := pl.pvLister.Get(pvName) - if err != nil { - logger.V(5).Info("Unable to look up PV info for PVC and PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvName)) - // If we can't fetch PV associated with PVC, may be it got deleted - // or PVC was prebound to a PVC that hasn't been created yet. - // fallback to using StorageClass for volume counting - return pl.getCSIDriverInfoFromSC(logger, csiNode, pvc) - } - - csiSource := pv.Spec.PersistentVolumeSource.CSI - if csiSource == nil { - // We make a fast path for non-CSI volumes that aren't migratable - if !pl.translator.IsPVMigratable(pv) { - return "", "" - } - - pluginName, err := pl.translator.GetInTreePluginNameFromSpec(pv, nil) - if err != nil { - logger.V(5).Info("Unable to look up plugin name from PV spec", "err", err) - return "", "" - } - - if !isCSIMigrationOn(csiNode, pluginName, pl.enableCSIMigrationPortworx) { - logger.V(5).Info("CSI Migration of plugin is not enabled", "plugin", pluginName) - return "", "" - } - - csiPV, err := pl.translator.TranslateInTreePVToCSI(logger, pv) - if err != nil { - logger.V(5).Info("Unable to translate in-tree volume to CSI", "err", err) - return "", "" - } - - if csiPV.Spec.PersistentVolumeSource.CSI == nil { - logger.V(5).Info("Unable to get a valid volume source for translated PV", "PV", pvName) - return "", "" - } - - csiSource = csiPV.Spec.PersistentVolumeSource.CSI - } - - return csiSource.Driver, csiSource.VolumeHandle -} - -// getCSIDriverInfoFromSC returns the CSI driver name and a random volume ID of a given PVC's StorageClass. -func (pl *CSILimits) getCSIDriverInfoFromSC(logger klog.Logger, csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { - namespace := pvc.Namespace - pvcName := pvc.Name - scName := storagehelpers.GetPersistentVolumeClaimClass(pvc) - - // If StorageClass is not set or not found, then PVC must be using immediate binding mode - // and hence it must be bound before scheduling. So it is safe to not count it. - if scName == "" { - logger.V(5).Info("PVC has no StorageClass", "PVC", klog.KObj(pvc)) - return "", "" - } - - storageClass, err := pl.scLister.Get(scName) - if err != nil { - logger.V(5).Info("Could not get StorageClass for PVC", "PVC", klog.KObj(pvc), "err", err) - return "", "" - } - - // We use random prefix to avoid conflict with volume IDs. If PVC is bound during the execution of the - // predicate and there is another pod on the same node that uses same volume, then we will overcount - // the volume and consider both volumes as different. - volumeHandle := fmt.Sprintf("%s-%s/%s", pl.randomVolumeIDPrefix, namespace, pvcName) - - provisioner := storageClass.Provisioner - if pl.translator.IsMigratableIntreePluginByName(provisioner) { - if !isCSIMigrationOn(csiNode, provisioner, pl.enableCSIMigrationPortworx) { - logger.V(5).Info("CSI Migration of provisioner is not enabled", "provisioner", provisioner) - return "", "" - } - - driverName, err := pl.translator.GetCSINameFromInTreeName(provisioner) - if err != nil { - logger.V(5).Info("Unable to look up driver name from provisioner name", "provisioner", provisioner, "err", err) - return "", "" - } - return driverName, volumeHandle - } - - return provisioner, volumeHandle -} - -// NewCSI initializes a new plugin and returns it. -func NewCSI(_ context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() - pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() - csiNodesLister := informerFactory.Storage().V1().CSINodes().Lister() - scLister := informerFactory.Storage().V1().StorageClasses().Lister() - vaLister := informerFactory.Storage().V1().VolumeAttachments().Lister() - csiTranslator := csitrans.New() - - return &CSILimits{ - csiNodeLister: csiNodesLister, - pvLister: pvLister, - pvcLister: pvcLister, - scLister: scLister, - vaLister: vaLister, - enableCSIMigrationPortworx: fts.EnableCSIMigrationPortworx, - randomVolumeIDPrefix: rand.String(32), - translator: csiTranslator, - }, nil -} - -// getVolumeLimits reads the volume limits from CSINode object and returns a map of volume limits. -// The key is the driver name and the value is the maximum number of volumes that can be attached to the node. -// If a key is not found in the map, it means there is no limit for the driver on the node. -func getVolumeLimits(csiNode *storagev1.CSINode) map[string]int64 { - nodeVolumeLimits := make(map[string]int64) - if csiNode == nil { - return nodeVolumeLimits - } - for _, d := range csiNode.Spec.Drivers { - if d.Allocatable != nil && d.Allocatable.Count != nil { - nodeVolumeLimits[d.Name] = int64(*d.Allocatable.Count) - } - } - return nodeVolumeLimits -} - -// getNodeVolumeAttachmentInfo returns a map of volumeID to driver name for the given node. -func (pl *CSILimits) getNodeVolumeAttachmentInfo(logger klog.Logger, nodeName string) (map[string]string, error) { - volumeAttachments := make(map[string]string) - vas, err := pl.vaLister.List(labels.Everything()) - if err != nil { - return nil, err - } - for _, va := range vas { - if va.Spec.NodeName == nodeName { - if va.Spec.Attacher == "" { - logger.V(5).Info("VolumeAttachment has no attacher", "VolumeAttachment", klog.KObj(va)) - continue - } - if va.Spec.Source.PersistentVolumeName == nil { - logger.V(5).Info("VolumeAttachment has no PV name", "VolumeAttachment", klog.KObj(va)) - continue - } - pv, err := pl.pvLister.Get(*va.Spec.Source.PersistentVolumeName) - if err != nil { - logger.V(5).Info("Unable to get PV for VolumeAttachment", "VolumeAttachment", klog.KObj(va), "err", err) - continue - } - if pv.Spec.CSI == nil { - logger.V(5).Info("PV is not a CSI volume", "PV", klog.KObj(pv)) - continue - } - volumeID := getVolumeUniqueName(va.Spec.Attacher, pv.Spec.CSI.VolumeHandle) - volumeAttachments[volumeID] = va.Spec.Attacher - } - } - return volumeAttachments, nil -} - -func getVolumeUniqueName(driverName, volumeHandle string) string { - return fmt.Sprintf("%s/%s", driverName, volumeHandle) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go deleted file mode 100644 index 7cb761379d3..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "strings" - - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/util/sets" - csilibplugins "k8s.io/csi-translation-lib/plugins" -) - -// isCSIMigrationOn returns a boolean value indicating whether -// the CSI migration has been enabled for a particular storage plugin. -func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string, enableCSIMigrationPortworx bool) bool { - if csiNode == nil || len(pluginName) == 0 { - return false - } - - // In-tree storage to CSI driver migration feature should be enabled, - // along with the plugin-specific one - switch pluginName { - case csilibplugins.AWSEBSInTreePluginName: - return true - case csilibplugins.PortworxVolumePluginName: - if !enableCSIMigrationPortworx { - return false - } - case csilibplugins.GCEPDInTreePluginName: - return true - case csilibplugins.AzureDiskInTreePluginName: - return true - case csilibplugins.CinderInTreePluginName: - return true - default: - return false - } - - // The plugin name should be listed in the CSINode object annotation. - // This indicates that the plugin has been migrated to a CSI driver in the node. - csiNodeAnn := csiNode.GetAnnotations() - if csiNodeAnn == nil { - return false - } - - var mpaSet sets.Set[string] - mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey] - if len(mpa) == 0 { - mpaSet = sets.New[string]() - } else { - tok := strings.Split(mpa, ",") - mpaSet = sets.New(tok...) - } - - return mpaSet.Has(pluginName) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/common.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/common.go deleted file mode 100644 index 5896aad65ff..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/common.go +++ /dev/null @@ -1,169 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podtopologyspread - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - v1helper "k8s.io/component-helpers/scheduling/corev1" - "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" - "k8s.io/utils/ptr" -) - -// topologySpreadConstraint is an internal version for v1.TopologySpreadConstraint -// and where the selector is parsed. -// Fields are exported for comparison during testing. -type topologySpreadConstraint struct { - MaxSkew int32 - TopologyKey string - Selector labels.Selector - MinDomains int32 - NodeAffinityPolicy v1.NodeInclusionPolicy - NodeTaintsPolicy v1.NodeInclusionPolicy -} - -func (tsc *topologySpreadConstraint) matchNodeInclusionPolicies(pod *v1.Pod, node *v1.Node, require nodeaffinity.RequiredNodeAffinity) bool { - if tsc.NodeAffinityPolicy == v1.NodeInclusionPolicyHonor { - // We ignore parsing errors here for backwards compatibility. - if match, _ := require.Match(node); !match { - return false - } - } - - if tsc.NodeTaintsPolicy == v1.NodeInclusionPolicyHonor { - if _, untolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc()); untolerated { - return false - } - } - return true -} - -// buildDefaultConstraints builds the constraints for a pod using -// .DefaultConstraints and the selectors from the services, replication -// controllers, replica sets and stateful sets that match the pod. -func (pl *PodTopologySpread) buildDefaultConstraints(p *v1.Pod, action v1.UnsatisfiableConstraintAction) ([]topologySpreadConstraint, error) { - constraints, err := pl.filterTopologySpreadConstraints(pl.defaultConstraints, p.Labels, action) - if err != nil || len(constraints) == 0 { - return nil, err - } - selector := helper.DefaultSelector(p, pl.services, pl.replicationCtrls, pl.replicaSets, pl.statefulSets) - if selector.Empty() { - return nil, nil - } - for i := range constraints { - constraints[i].Selector = selector - } - return constraints, nil -} - -// nodeLabelsMatchSpreadConstraints checks if ALL topology keys in spread Constraints are present in node labels. -func nodeLabelsMatchSpreadConstraints(nodeLabels map[string]string, constraints []topologySpreadConstraint) bool { - for _, c := range constraints { - if _, ok := nodeLabels[c.TopologyKey]; !ok { - return false - } - } - return true -} - -func (pl *PodTopologySpread) filterTopologySpreadConstraints(constraints []v1.TopologySpreadConstraint, podLabels map[string]string, action v1.UnsatisfiableConstraintAction) ([]topologySpreadConstraint, error) { - var result []topologySpreadConstraint - for _, c := range constraints { - if c.WhenUnsatisfiable == action { - selector, err := metav1.LabelSelectorAsSelector(c.LabelSelector) - if err != nil { - return nil, err - } - - if pl.enableMatchLabelKeysInPodTopologySpread && len(c.MatchLabelKeys) > 0 { - matchLabels := make(labels.Set) - for _, labelKey := range c.MatchLabelKeys { - if value, ok := podLabels[labelKey]; ok { - matchLabels[labelKey] = value - } - } - if len(matchLabels) > 0 { - selector = mergeLabelSetWithSelector(matchLabels, selector) - } - } - - tsc := topologySpreadConstraint{ - MaxSkew: c.MaxSkew, - TopologyKey: c.TopologyKey, - Selector: selector, - MinDomains: ptr.Deref(c.MinDomains, 1), // If MinDomains is nil, we treat MinDomains as 1. - NodeAffinityPolicy: v1.NodeInclusionPolicyHonor, // If NodeAffinityPolicy is nil, we treat NodeAffinityPolicy as "Honor". - NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore, // If NodeTaintsPolicy is nil, we treat NodeTaintsPolicy as "Ignore". - } - if pl.enableNodeInclusionPolicyInPodTopologySpread { - if c.NodeAffinityPolicy != nil { - tsc.NodeAffinityPolicy = *c.NodeAffinityPolicy - } - if c.NodeTaintsPolicy != nil { - tsc.NodeTaintsPolicy = *c.NodeTaintsPolicy - } - } - result = append(result, tsc) - } - } - return result, nil -} - -func mergeLabelSetWithSelector(matchLabels labels.Set, s labels.Selector) labels.Selector { - mergedSelector := labels.SelectorFromSet(matchLabels) - - requirements, ok := s.Requirements() - if !ok { - return s - } - - for _, r := range requirements { - mergedSelector = mergedSelector.Add(r) - } - - return mergedSelector -} - -func countPodsMatchSelector(podInfos []*framework.PodInfo, selector labels.Selector, ns string) int { - if selector.Empty() { - return 0 - } - count := 0 - for _, p := range podInfos { - // Bypass terminating Pod (see #87621). - if p.Pod.DeletionTimestamp != nil || p.Pod.Namespace != ns { - continue - } - if selector.Matches(labels.Set(p.Pod.Labels)) { - count++ - } - } - return count -} - -// podLabelsMatchSpreadConstraints returns whether tha labels matches with the selector in any of topologySpreadConstraint -func podLabelsMatchSpreadConstraints(constraints []topologySpreadConstraint, labels labels.Set) bool { - for _, c := range constraints { - if c.Selector.Matches(labels) { - return true - } - } - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go deleted file mode 100644 index 2b0d5d8f3cf..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podtopologyspread - -import ( - "context" - "fmt" - "maps" - "math" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -const preFilterStateKey = "PreFilter" + Name - -// preFilterState computed at PreFilter and used at Filter. -// It combines CriticalPaths and TpValueToMatchNum to represent: -// (1) critical paths where the least pods are matched on each spread constraint. -// (2) number of pods matched on each spread constraint. -// A nil preFilterState denotes it's not set at all (in PreFilter phase); -// An empty preFilterState object denotes it's a legit state and is set in PreFilter phase. -// Fields are exported for comparison during testing. -type preFilterState struct { - Constraints []topologySpreadConstraint - // CriticalPaths is a slice indexed by constraint index. - // Per each entry, we record 2 critical paths instead of all critical paths. - // CriticalPaths[i][0].MatchNum always holds the minimum matching number. - // CriticalPaths[i][1].MatchNum is always greater or equal to CriticalPaths[i][0].MatchNum, but - // it's not guaranteed to be the 2nd minimum match number. - CriticalPaths []*criticalPaths - // TpValueToMatchNum is a slice indexed by constraint index. - // Each entry is keyed with topology value, and valued with the number of matching pods. - TpValueToMatchNum []map[string]int -} - -// minMatchNum returns the global minimum for the calculation of skew while taking MinDomains into account. -func (s *preFilterState) minMatchNum(constraintID int, minDomains int32) (int, error) { - paths := s.CriticalPaths[constraintID] - - minMatchNum := paths[0].MatchNum - domainsNum := len(s.TpValueToMatchNum[constraintID]) - - if domainsNum < int(minDomains) { - // When the number of eligible domains with matching topology keys is less than `minDomains`, - // it treats "global minimum" as 0. - minMatchNum = 0 - } - - return minMatchNum, nil -} - -// Clone makes a copy of the given state. -func (s *preFilterState) Clone() framework.StateData { - if s == nil { - return nil - } - copy := preFilterState{ - // Constraints are shared because they don't change. - Constraints: s.Constraints, - CriticalPaths: make([]*criticalPaths, len(s.CriticalPaths)), - TpValueToMatchNum: make([]map[string]int, len(s.TpValueToMatchNum)), - } - for i, paths := range s.CriticalPaths { - copy.CriticalPaths[i] = &criticalPaths{paths[0], paths[1]} - } - for i, tpMap := range s.TpValueToMatchNum { - copy.TpValueToMatchNum[i] = maps.Clone(tpMap) - } - return © -} - -// CAVEAT: the reason that `[2]criticalPath` can work is based on the implementation of current -// preemption algorithm, in particular the following 2 facts: -// Fact 1: we only preempt pods on the same node, instead of pods on multiple nodes. -// Fact 2: each node is evaluated on a separate copy of the preFilterState during its preemption cycle. -// If we plan to turn to a more complex algorithm like "arbitrary pods on multiple nodes", this -// structure needs to be revisited. -// Fields are exported for comparison during testing. -type criticalPaths [2]struct { - // TopologyValue denotes the topology value mapping to topology key. - TopologyValue string - // MatchNum denotes the number of matching pods. - MatchNum int -} - -func newCriticalPaths() *criticalPaths { - return &criticalPaths{{MatchNum: math.MaxInt32}, {MatchNum: math.MaxInt32}} -} - -func (p *criticalPaths) update(tpVal string, num int) { - // first verify if `tpVal` exists or not - i := -1 - if tpVal == p[0].TopologyValue { - i = 0 - } else if tpVal == p[1].TopologyValue { - i = 1 - } - - if i >= 0 { - // `tpVal` exists - p[i].MatchNum = num - if p[0].MatchNum > p[1].MatchNum { - // swap paths[0] and paths[1] - p[0], p[1] = p[1], p[0] - } - } else { - // `tpVal` doesn't exist - if num < p[0].MatchNum { - // update paths[1] with paths[0] - p[1] = p[0] - // update paths[0] - p[0].TopologyValue, p[0].MatchNum = tpVal, num - } else if num < p[1].MatchNum { - // update paths[1] - p[1].TopologyValue, p[1].MatchNum = tpVal, num - } - } -} - -// PreFilter invoked at the prefilter extension point. -func (pl *PodTopologySpread) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - s, err := pl.calPreFilterState(ctx, pod) - if err != nil { - return nil, framework.AsStatus(err) - } else if s != nil && len(s.Constraints) == 0 { - return nil, framework.NewStatus(framework.Skip) - } - - cycleState.Write(preFilterStateKey, s) - return nil, nil -} - -// PreFilterExtensions returns prefilter extensions, pod add and remove. -func (pl *PodTopologySpread) PreFilterExtensions() framework.PreFilterExtensions { - return pl -} - -// AddPod from pre-computed data in cycleState. -func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - s, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - - pl.updateWithPod(s, podInfoToAdd.Pod, podToSchedule, nodeInfo.Node(), 1) - return nil -} - -// RemovePod from pre-computed data in cycleState. -func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - s, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - - pl.updateWithPod(s, podInfoToRemove.Pod, podToSchedule, nodeInfo.Node(), -1) - return nil -} - -func (pl *PodTopologySpread) updateWithPod(s *preFilterState, updatedPod, preemptorPod *v1.Pod, node *v1.Node, delta int) { - if s == nil || updatedPod.Namespace != preemptorPod.Namespace || node == nil { - return - } - if !nodeLabelsMatchSpreadConstraints(node.Labels, s.Constraints) { - return - } - - requiredSchedulingTerm := nodeaffinity.GetRequiredNodeAffinity(preemptorPod) - if !pl.enableNodeInclusionPolicyInPodTopologySpread { - // spreading is applied to nodes that pass those filters. - // Ignore parsing errors for backwards compatibility. - if match, _ := requiredSchedulingTerm.Match(node); !match { - return - } - } - - podLabelSet := labels.Set(updatedPod.Labels) - for i, constraint := range s.Constraints { - if !constraint.Selector.Matches(podLabelSet) { - continue - } - - if pl.enableNodeInclusionPolicyInPodTopologySpread && - !constraint.matchNodeInclusionPolicies(preemptorPod, node, requiredSchedulingTerm) { - continue - } - - v := node.Labels[constraint.TopologyKey] - s.TpValueToMatchNum[i][v] += delta - s.CriticalPaths[i].update(v, s.TpValueToMatchNum[i][v]) - } -} - -// getPreFilterState fetches a pre-computed preFilterState. -func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { - c, err := cycleState.Read(preFilterStateKey) - if err != nil { - // preFilterState doesn't exist, likely PreFilter wasn't invoked. - return nil, fmt.Errorf("reading %q from cycleState: %w", preFilterStateKey, err) - } - - s, ok := c.(*preFilterState) - if !ok { - return nil, fmt.Errorf("%+v convert to podtopologyspread.preFilterState error", c) - } - return s, nil -} - -type topologyCount struct { - topologyValue string - constraintID int - count int -} - -// calPreFilterState computes preFilterState describing how pods are spread on topologies. -func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod) (*preFilterState, error) { - constraints, err := pl.getConstraints(pod) - if err != nil { - return nil, fmt.Errorf("get constraints from pod: %w", err) - } - if len(constraints) == 0 { - return &preFilterState{}, nil - } - - allNodes, err := pl.sharedLister.NodeInfos().List() - if err != nil { - return nil, fmt.Errorf("listing NodeInfos: %w", err) - } - - s := preFilterState{ - Constraints: constraints, - CriticalPaths: make([]*criticalPaths, len(constraints)), - TpValueToMatchNum: make([]map[string]int, len(constraints)), - } - for i := 0; i < len(constraints); i++ { - s.TpValueToMatchNum[i] = make(map[string]int, sizeHeuristic(len(allNodes), constraints[i])) - } - - tpCountsByNode := make([][]topologyCount, len(allNodes)) - requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod) - processNode := func(n int) { - nodeInfo := allNodes[n] - node := nodeInfo.Node() - - if !pl.enableNodeInclusionPolicyInPodTopologySpread { - // spreading is applied to nodes that pass those filters. - // Ignore parsing errors for backwards compatibility. - if match, _ := requiredNodeAffinity.Match(node); !match { - return - } - } - - // Ensure current node's labels contains all topologyKeys in 'Constraints'. - if !nodeLabelsMatchSpreadConstraints(node.Labels, constraints) { - return - } - - tpCounts := make([]topologyCount, 0, len(constraints)) - for i, c := range constraints { - if pl.enableNodeInclusionPolicyInPodTopologySpread && - !c.matchNodeInclusionPolicies(pod, node, requiredNodeAffinity) { - continue - } - - value := node.Labels[c.TopologyKey] - count := countPodsMatchSelector(nodeInfo.Pods, c.Selector, pod.Namespace) - tpCounts = append(tpCounts, topologyCount{ - topologyValue: value, - constraintID: i, - count: count, - }) - } - tpCountsByNode[n] = tpCounts - } - pl.parallelizer.Until(ctx, len(allNodes), processNode, pl.Name()) - - for _, tpCounts := range tpCountsByNode { - // tpCounts might not hold all the constraints, so index can't be used here as constraintID. - for _, tpCount := range tpCounts { - s.TpValueToMatchNum[tpCount.constraintID][tpCount.topologyValue] += tpCount.count - } - } - - // calculate min match for each constraint and topology value - for i := 0; i < len(constraints); i++ { - s.CriticalPaths[i] = newCriticalPaths() - - for value, num := range s.TpValueToMatchNum[i] { - s.CriticalPaths[i].update(value, num) - } - } - - return &s, nil -} - -// Filter invoked at the filter extension point. -func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - node := nodeInfo.Node() - - s, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - - // However, "empty" preFilterState is legit which tolerates every toSchedule Pod. - if len(s.Constraints) == 0 { - return nil - } - - logger := klog.FromContext(ctx) - podLabelSet := labels.Set(pod.Labels) - for i, c := range s.Constraints { - tpKey := c.TopologyKey - tpVal, ok := node.Labels[tpKey] - if !ok { - logger.V(5).Info("Node doesn't have required topology label for spread constraint", "node", klog.KObj(node), "topologyKey", tpKey) - return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonNodeLabelNotMatch) - } - - // judging criteria: - // 'existing matching num' + 'if self-match (1 or 0)' - 'global minimum' <= 'maxSkew' - minMatchNum, err := s.minMatchNum(i, c.MinDomains) - if err != nil { - logger.Error(err, "Internal error occurred while retrieving value precalculated in PreFilter", "topologyKey", tpKey, "paths", s.CriticalPaths[i]) - continue - } - - selfMatchNum := 0 - if c.Selector.Matches(podLabelSet) { - selfMatchNum = 1 - } - - matchNum := s.TpValueToMatchNum[i][tpVal] - skew := matchNum + selfMatchNum - minMatchNum - if skew > int(c.MaxSkew) { - logger.V(5).Info("Node failed spreadConstraint: matchNum + selfMatchNum - minMatchNum > maxSkew", "node", klog.KObj(node), "topologyKey", tpKey, "matchNum", matchNum, "selfMatchNum", selfMatchNum, "minMatchNum", minMatchNum, "maxSkew", c.MaxSkew) - return framework.NewStatus(framework.Unschedulable, ErrReasonConstraintsNotMatch) - } - } - - return nil -} - -func sizeHeuristic(nodes int, constraint topologySpreadConstraint) int { - if constraint.TopologyKey == v1.LabelHostname { - return nodes - } - return 0 -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go deleted file mode 100644 index 4295755ca8a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go +++ /dev/null @@ -1,351 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podtopologyspread - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - appslisters "k8s.io/client-go/listers/apps/v1" - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/parallelize" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -const ( - // ErrReasonConstraintsNotMatch is used for PodTopologySpread filter error. - ErrReasonConstraintsNotMatch = "node(s) didn't match pod topology spread constraints" - // ErrReasonNodeLabelNotMatch is used when the node doesn't hold the required label. - ErrReasonNodeLabelNotMatch = ErrReasonConstraintsNotMatch + " (missing required label)" -) - -var systemDefaultConstraints = []v1.TopologySpreadConstraint{ - { - TopologyKey: v1.LabelHostname, - WhenUnsatisfiable: v1.ScheduleAnyway, - MaxSkew: 3, - }, - { - TopologyKey: v1.LabelTopologyZone, - WhenUnsatisfiable: v1.ScheduleAnyway, - MaxSkew: 5, - }, -} - -// PodTopologySpread is a plugin that ensures pod's topologySpreadConstraints is satisfied. -type PodTopologySpread struct { - systemDefaulted bool - parallelizer parallelize.Parallelizer - defaultConstraints []v1.TopologySpreadConstraint - sharedLister framework.SharedLister - services corelisters.ServiceLister - replicationCtrls corelisters.ReplicationControllerLister - replicaSets appslisters.ReplicaSetLister - statefulSets appslisters.StatefulSetLister - enableNodeInclusionPolicyInPodTopologySpread bool - enableMatchLabelKeysInPodTopologySpread bool - enableSchedulingQueueHint bool -} - -var _ framework.PreFilterPlugin = &PodTopologySpread{} -var _ framework.FilterPlugin = &PodTopologySpread{} -var _ framework.PreScorePlugin = &PodTopologySpread{} -var _ framework.ScorePlugin = &PodTopologySpread{} -var _ framework.EnqueueExtensions = &PodTopologySpread{} - -// Name is the name of the plugin used in the plugin registry and configurations. -const Name = names.PodTopologySpread - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *PodTopologySpread) Name() string { - return Name -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { - if h.SnapshotSharedLister() == nil { - return nil, fmt.Errorf("SnapshotSharedlister is nil") - } - args, err := getArgs(plArgs) - if err != nil { - return nil, err - } - if err := validation.ValidatePodTopologySpreadArgs(nil, &args); err != nil { - return nil, err - } - pl := &PodTopologySpread{ - parallelizer: h.Parallelizer(), - sharedLister: h.SnapshotSharedLister(), - defaultConstraints: args.DefaultConstraints, - enableNodeInclusionPolicyInPodTopologySpread: fts.EnableNodeInclusionPolicyInPodTopologySpread, - enableMatchLabelKeysInPodTopologySpread: fts.EnableMatchLabelKeysInPodTopologySpread, - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - } - if args.DefaultingType == config.SystemDefaulting { - pl.defaultConstraints = systemDefaultConstraints - pl.systemDefaulted = true - } - if len(pl.defaultConstraints) != 0 { - if h.SharedInformerFactory() == nil { - return nil, fmt.Errorf("SharedInformerFactory is nil") - } - pl.setListers(h.SharedInformerFactory()) - } - return pl, nil -} - -func getArgs(obj runtime.Object) (config.PodTopologySpreadArgs, error) { - ptr, ok := obj.(*config.PodTopologySpreadArgs) - if !ok { - return config.PodTopologySpreadArgs{}, fmt.Errorf("want args to be of type PodTopologySpreadArgs, got %T", obj) - } - return *ptr, nil -} - -func (pl *PodTopologySpread) setListers(factory informers.SharedInformerFactory) { - pl.services = factory.Core().V1().Services().Lister() - pl.replicationCtrls = factory.Core().V1().ReplicationControllers().Lister() - pl.replicaSets = factory.Apps().V1().ReplicaSets().Lister() - pl.statefulSets = factory.Apps().V1().StatefulSets().Lister() -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *PodTopologySpread) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - podActionType := framework.Add | framework.UpdatePodLabel | framework.Delete - if pl.enableSchedulingQueueHint { - // When the QueueingHint feature is enabled, the scheduling queue uses Pod/Update Queueing Hint - // to determine whether a Pod's update makes the Pod schedulable or not. - // https://github.com/kubernetes/kubernetes/pull/122234 - // (If not, the scheduling queue always retries the unschedulable Pods when they're updated.) - // - // The Pod rejected by this plugin can be schedulable when the Pod has a spread constraint with NodeTaintsPolicy:Honor - // and has got a new toleration. - // So, we add UpdatePodToleration here only when QHint is enabled. - podActionType = framework.Add | framework.UpdatePodLabel | framework.UpdatePodToleration | framework.Delete - } - - return []framework.ClusterEventWithHint{ - // All ActionType includes the following events: - // - Add. An unschedulable Pod may fail due to violating topology spread constraints, - // adding an assigned Pod may make it schedulable. - // - UpdatePodLabel. Updating on an existing Pod's labels (e.g., removal) may make - // an unschedulable Pod schedulable. - // - Delete. An unschedulable Pod may fail due to violating an existing Pod's topology spread constraints, - // deleting an existing Pod may make it schedulable. - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: podActionType}, QueueingHintFn: pl.isSchedulableAfterPodChange}, - // Node add|delete|update maybe lead an topology key changed, - // and make these pod in scheduling schedulable or unschedulable. - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.Delete | framework.UpdateNodeLabel | framework.UpdateNodeTaint}, QueueingHintFn: pl.isSchedulableAfterNodeChange}, - }, nil -} - -// involvedInTopologySpreading returns true if the incomingPod is involved in the topology spreading of podWithSpreading. -func involvedInTopologySpreading(incomingPod, podWithSpreading *v1.Pod) bool { - return incomingPod.UID == podWithSpreading.UID || - (incomingPod.Spec.NodeName != "" && incomingPod.Namespace == podWithSpreading.Namespace) -} - -// hasConstraintWithNodeTaintsPolicyHonor returns true if any constraint has `NodeTaintsPolicy: Honor`. -func hasConstraintWithNodeTaintsPolicyHonor(constraints []topologySpreadConstraint) bool { - for _, c := range constraints { - if c.NodeTaintsPolicy == v1.NodeInclusionPolicyHonor { - return true - } - } - return false -} - -func (pl *PodTopologySpread) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalPod, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - if (modifiedPod != nil && !involvedInTopologySpreading(modifiedPod, pod)) || (originalPod != nil && !involvedInTopologySpreading(originalPod, pod)) { - logger.V(5).Info("the added/updated/deleted pod is unscheduled or has different namespace with target pod, so it doesn't make the target pod schedulable", - "pod", klog.KObj(pod), "originalPod", klog.KObj(originalPod)) - return framework.QueueSkip, nil - } - - constraints, err := pl.getConstraints(pod) - if err != nil { - return framework.Queue, err - } - - // Pod is modified. Return Queue when the label(s) matching topologySpread's selector is added, changed, or deleted. - if modifiedPod != nil && originalPod != nil { - if pod.UID == modifiedPod.UID && !equality.Semantic.DeepEqual(modifiedPod.Spec.Tolerations, originalPod.Spec.Tolerations) && hasConstraintWithNodeTaintsPolicyHonor(constraints) { - // If any constraint has `NodeTaintsPolicy: Honor`, we can return Queue when the target Pod has got a new toleration. - logger.V(5).Info("the unschedulable pod has got a new toleration, which could make it schedulable", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.Queue, nil - } - - if equality.Semantic.DeepEqual(modifiedPod.Labels, originalPod.Labels) { - logger.V(5).Info("the pod's update doesn't include the label update, which doesn't make the target pod schedulable", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - for _, c := range constraints { - if c.Selector.Matches(labels.Set(originalPod.Labels)) != c.Selector.Matches(labels.Set(modifiedPod.Labels)) { - // This modification makes this Pod match(or not match) with this constraint. - // Maybe now the scheduling result of topology spread gets changed by this change. - logger.V(5).Info("a scheduled pod's label was updated and it makes the updated pod match or unmatch the pod's topology spread constraints", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.Queue, nil - } - } - // This modification of labels doesn't change whether this Pod would match selector or not in any constraints. - logger.V(5).Info("a scheduled pod's label was updated, but it's a change unrelated to the pod's topology spread constraints", - "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - - // Pod is added. Return Queue when the added Pod has a label that matches with topologySpread's selector. - if modifiedPod != nil { - if podLabelsMatchSpreadConstraints(constraints, modifiedPod.Labels) { - logger.V(5).Info("a scheduled pod was created and it matches with the pod's topology spread constraints", - "pod", klog.KObj(pod), "createdPod", klog.KObj(modifiedPod)) - return framework.Queue, nil - } - logger.V(5).Info("a scheduled pod was created, but it doesn't matches with the pod's topology spread constraints", - "pod", klog.KObj(pod), "createdPod", klog.KObj(modifiedPod)) - return framework.QueueSkip, nil - } - - // Pod is deleted. Return Queue when the deleted Pod has a label that matches with topologySpread's selector. - if podLabelsMatchSpreadConstraints(constraints, originalPod.Labels) { - logger.V(5).Info("a scheduled pod which matches with the pod's topology spread constraints was deleted, and the pod may be schedulable now", - "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod)) - return framework.Queue, nil - } - logger.V(5).Info("a scheduled pod was deleted, but it's unrelated to the pod's topology spread constraints", - "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod)) - - return framework.QueueSkip, nil -} - -// getConstraints extracts topologySpreadConstraint(s) from the Pod spec. -// If the Pod doesn't have any topologySpreadConstraint, it returns default constraints. -func (pl *PodTopologySpread) getConstraints(pod *v1.Pod) ([]topologySpreadConstraint, error) { - var constraints []topologySpreadConstraint - var err error - if len(pod.Spec.TopologySpreadConstraints) > 0 { - // We have feature gating in APIServer to strip the spec - // so don't need to re-check feature gate, just check length of Constraints. - constraints, err = pl.filterTopologySpreadConstraints( - pod.Spec.TopologySpreadConstraints, - pod.Labels, - v1.DoNotSchedule, - ) - if err != nil { - return nil, fmt.Errorf("obtaining pod's hard topology spread constraints: %w", err) - } - } else { - constraints, err = pl.buildDefaultConstraints(pod, v1.DoNotSchedule) - if err != nil { - return nil, fmt.Errorf("setting default hard topology spread constraints: %w", err) - } - } - return constraints, nil -} - -// isSchedulableAfterNodeChange returns Queue when node has topologyKey in its labels, else return QueueSkip. -func (pl *PodTopologySpread) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - constraints, err := pl.getConstraints(pod) - if err != nil { - return framework.Queue, err - } - - var originalNodeMatching, modifiedNodeMatching bool - if originalNode != nil { - originalNodeMatching = nodeLabelsMatchSpreadConstraints(originalNode.Labels, constraints) - } - if modifiedNode != nil { - modifiedNodeMatching = nodeLabelsMatchSpreadConstraints(modifiedNode.Labels, constraints) - } - - // We return Queue in the following cases: - // 1. Node/UpdateNodeLabel: - // - The original node matched the pod's topology spread constraints, but the modified node does not. - // - The modified node matches the pod's topology spread constraints, but the original node does not. - // - The modified node matches the pod's topology spread constraints, and the original node and the modified node have different label values for any topologyKey. - // 2. Node/UpdateNodeTaint: - // - The modified node match the pod's topology spread constraints, and the original node and the modified node have different taints. - // 3. Node/Add: The created node matches the pod's topology spread constraints. - // 4. Node/Delete: The original node matched the pod's topology spread constraints. - if originalNode != nil && modifiedNode != nil { - if originalNodeMatching != modifiedNodeMatching { - logger.V(5).Info("the node is updated and now pod topology spread constraints has changed, and the pod may be schedulable now", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode), "originalMatching", originalNodeMatching, "newMatching", modifiedNodeMatching) - return framework.Queue, nil - } - if modifiedNodeMatching && (checkTopologyKeyLabelsChanged(originalNode.Labels, modifiedNode.Labels, constraints) || !equality.Semantic.DeepEqual(originalNode.Spec.Taints, modifiedNode.Spec.Taints)) { - logger.V(5).Info("the node is updated and now has different taints or labels, and the pod may be schedulable now", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - return framework.QueueSkip, nil - } - - if modifiedNode != nil { - if !modifiedNodeMatching { - logger.V(5).Info("the created node doesn't match pod topology spread constraints", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.QueueSkip, nil - } - logger.V(5).Info("the created node matches topology spread constraints, and the pod may be schedulable now", - "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - - if !originalNodeMatching { - logger.V(5).Info("the deleted node doesn't match pod topology spread constraints", "pod", klog.KObj(pod), "node", klog.KObj(originalNode)) - return framework.QueueSkip, nil - } - logger.V(5).Info("the deleted node matches topology spread constraints, and the pod may be schedulable now", - "pod", klog.KObj(pod), "node", klog.KObj(originalNode)) - return framework.Queue, nil -} - -// checkTopologyKeyLabelsChanged checks if any of the labels specified as topologyKey in the constraints have changed. -func checkTopologyKeyLabelsChanged(originalLabels, modifiedLabels map[string]string, constraints []topologySpreadConstraint) bool { - for _, constraint := range constraints { - topologyKey := constraint.TopologyKey - if originalLabels[topologyKey] != modifiedLabels[topologyKey] { - return true - } - } - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go deleted file mode 100644 index ed8a9542f44..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go +++ /dev/null @@ -1,303 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podtopologyspread - -import ( - "context" - "fmt" - "math" - "sync/atomic" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -const preScoreStateKey = "PreScore" + Name -const invalidScore = -1 - -// preScoreState computed at PreScore and used at Score. -// Fields are exported for comparison during testing. -type preScoreState struct { - Constraints []topologySpreadConstraint - // IgnoredNodes is a set of node names which miss some Constraints[*].topologyKey. - IgnoredNodes sets.Set[string] - // TopologyValueToPodCounts is a slice indexed by constraint index. - // Each entry is keyed with topology value, and valued with the number of matching pods. - TopologyValueToPodCounts []map[string]*int64 - // TopologyNormalizingWeight is the weight we give to the counts per topology. - // This allows the pod counts of smaller topologies to not be watered down by - // bigger ones. - TopologyNormalizingWeight []float64 -} - -// Clone implements the mandatory Clone interface. We don't really copy the data since -// there is no need for that. -func (s *preScoreState) Clone() framework.StateData { - return s -} - -// initPreScoreState iterates "filteredNodes" to filter out the nodes which -// don't have required topologyKey(s), and initialize: -// 1) s.TopologyPairToPodCounts: keyed with both eligible topology pair and node names. -// 2) s.IgnoredNodes: the set of nodes that shouldn't be scored. -// 3) s.TopologyNormalizingWeight: The weight to be given to each constraint based on the number of values in a topology. -func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, filteredNodes []*framework.NodeInfo, requireAllTopologies bool) error { - var err error - if len(pod.Spec.TopologySpreadConstraints) > 0 { - s.Constraints, err = pl.filterTopologySpreadConstraints( - pod.Spec.TopologySpreadConstraints, - pod.Labels, - v1.ScheduleAnyway, - ) - if err != nil { - return fmt.Errorf("obtaining pod's soft topology spread constraints: %w", err) - } - } else { - s.Constraints, err = pl.buildDefaultConstraints(pod, v1.ScheduleAnyway) - if err != nil { - return fmt.Errorf("setting default soft topology spread constraints: %w", err) - } - } - if len(s.Constraints) == 0 { - return nil - } - s.TopologyValueToPodCounts = make([]map[string]*int64, len(s.Constraints)) - for i := 0; i < len(s.Constraints); i++ { - s.TopologyValueToPodCounts[i] = make(map[string]*int64) - } - topoSize := make([]int, len(s.Constraints)) - for _, node := range filteredNodes { - if requireAllTopologies && !nodeLabelsMatchSpreadConstraints(node.Node().Labels, s.Constraints) { - // Nodes which don't have all required topologyKeys present are ignored - // when scoring later. - s.IgnoredNodes.Insert(node.Node().Name) - continue - } - for i, constraint := range s.Constraints { - // per-node counts are calculated during Score. - if constraint.TopologyKey == v1.LabelHostname { - continue - } - value := node.Node().Labels[constraint.TopologyKey] - if s.TopologyValueToPodCounts[i][value] == nil { - s.TopologyValueToPodCounts[i][value] = new(int64) - topoSize[i]++ - } - } - } - - s.TopologyNormalizingWeight = make([]float64, len(s.Constraints)) - for i, c := range s.Constraints { - sz := topoSize[i] - if c.TopologyKey == v1.LabelHostname { - sz = len(filteredNodes) - len(s.IgnoredNodes) - } - s.TopologyNormalizingWeight[i] = topologyNormalizingWeight(sz) - } - return nil -} - -// PreScore builds and writes cycle state used by Score and NormalizeScore. -func (pl *PodTopologySpread) PreScore( - ctx context.Context, - cycleState *framework.CycleState, - pod *v1.Pod, - filteredNodes []*framework.NodeInfo, -) *framework.Status { - allNodes, err := pl.sharedLister.NodeInfos().List() - if err != nil { - return framework.AsStatus(fmt.Errorf("getting all nodes: %w", err)) - } - - if len(allNodes) == 0 { - // No need to score. - return framework.NewStatus(framework.Skip) - } - - state := &preScoreState{ - IgnoredNodes: sets.New[string](), - } - // Only require that nodes have all the topology labels if using - // non-system-default spreading rules. This allows nodes that don't have a - // zone label to still have hostname spreading. - requireAllTopologies := len(pod.Spec.TopologySpreadConstraints) > 0 || !pl.systemDefaulted - err = pl.initPreScoreState(state, pod, filteredNodes, requireAllTopologies) - if err != nil { - return framework.AsStatus(fmt.Errorf("calculating preScoreState: %w", err)) - } - - // return Skip if incoming pod doesn't have soft topology spread Constraints. - if len(state.Constraints) == 0 { - return framework.NewStatus(framework.Skip) - } - - // Ignore parsing errors for backwards compatibility. - requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod) - processAllNode := func(n int) { - nodeInfo := allNodes[n] - node := nodeInfo.Node() - - if !pl.enableNodeInclusionPolicyInPodTopologySpread { - // `node` should satisfy incoming pod's NodeSelector/NodeAffinity - if match, _ := requiredNodeAffinity.Match(node); !match { - return - } - } - - // All topologyKeys need to be present in `node` - if requireAllTopologies && !nodeLabelsMatchSpreadConstraints(node.Labels, state.Constraints) { - return - } - - for i, c := range state.Constraints { - if pl.enableNodeInclusionPolicyInPodTopologySpread && - !c.matchNodeInclusionPolicies(pod, node, requiredNodeAffinity) { - continue - } - - value := node.Labels[c.TopologyKey] - // If current topology pair is not associated with any candidate node, - // continue to avoid unnecessary calculation. - // Per-node counts are also skipped, as they are done during Score. - tpCount := state.TopologyValueToPodCounts[i][value] - if tpCount == nil { - continue - } - count := countPodsMatchSelector(nodeInfo.Pods, c.Selector, pod.Namespace) - atomic.AddInt64(tpCount, int64(count)) - } - } - pl.parallelizer.Until(ctx, len(allNodes), processAllNode, pl.Name()) - - cycleState.Write(preScoreStateKey, state) - return nil -} - -// Score invoked at the Score extension point. -// The "score" returned in this function is the matching number of pods on the `nodeName`, -// it is normalized later. -func (pl *PodTopologySpread) Score(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - node := nodeInfo.Node() - s, err := getPreScoreState(cycleState) - if err != nil { - return 0, framework.AsStatus(err) - } - - // Return if the node is not qualified. - if s.IgnoredNodes.Has(node.Name) { - return 0, nil - } - - // For each present , current node gets a credit of . - // And we sum up and return it as this node's score. - var score float64 - for i, c := range s.Constraints { - if tpVal, ok := node.Labels[c.TopologyKey]; ok { - var cnt int64 - if c.TopologyKey == v1.LabelHostname { - cnt = int64(countPodsMatchSelector(nodeInfo.Pods, c.Selector, pod.Namespace)) - } else { - cnt = *s.TopologyValueToPodCounts[i][tpVal] - } - score += scoreForCount(cnt, c.MaxSkew, s.TopologyNormalizingWeight[i]) - } - } - return int64(math.Round(score)), nil -} - -// NormalizeScore invoked after scoring all nodes. -func (pl *PodTopologySpread) NormalizeScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { - s, err := getPreScoreState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - if s == nil { - return nil - } - - // Calculate and - var minScore int64 = math.MaxInt64 - var maxScore int64 - for i, score := range scores { - // it's mandatory to check if is present in m.IgnoredNodes - if s.IgnoredNodes.Has(score.Name) { - scores[i].Score = invalidScore - continue - } - if score.Score < minScore { - minScore = score.Score - } - if score.Score > maxScore { - maxScore = score.Score - } - } - - for i := range scores { - if scores[i].Score == invalidScore { - scores[i].Score = 0 - continue - } - if maxScore == 0 { - scores[i].Score = framework.MaxNodeScore - continue - } - s := scores[i].Score - scores[i].Score = framework.MaxNodeScore * (maxScore + minScore - s) / maxScore - } - return nil -} - -// ScoreExtensions of the Score plugin. -func (pl *PodTopologySpread) ScoreExtensions() framework.ScoreExtensions { - return pl -} - -func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) { - c, err := cycleState.Read(preScoreStateKey) - if err != nil { - return nil, fmt.Errorf("error reading %q from cycleState: %w", preScoreStateKey, err) - } - - s, ok := c.(*preScoreState) - if !ok { - return nil, fmt.Errorf("%+v convert to podtopologyspread.preScoreState error", c) - } - return s, nil -} - -// topologyNormalizingWeight calculates the weight for the topology, based on -// the number of values that exist for a topology. -// Since is at least 1 (all nodes that passed the Filters are in the -// same topology), and k8s supports 5k nodes, the result is in the interval -// <1.09, 8.52>. -// -// Note: could also be zero when no nodes have the required topologies, -// however we don't care about topology weight in this case as we return a 0 -// score for all nodes. -func topologyNormalizingWeight(size int) float64 { - return math.Log(float64(size + 2)) -} - -// scoreForCount calculates the score based on number of matching pods in a -// topology domain, the constraint's maxSkew and the topology weight. -// `maxSkew-1` is added to the score so that differences between topology -// domains get watered down, controlling the tolerance of the score to skews. -func scoreForCount(cnt int64, maxSkew int32, tpWeight float64) float64 { - return float64(cnt)*tpWeight + float64(maxSkew-1) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort/priority_sort.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort/priority_sort.go deleted file mode 100644 index b705cb73831..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort/priority_sort.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package queuesort - -import ( - "context" - - "k8s.io/apimachinery/pkg/runtime" - corev1helpers "k8s.io/component-helpers/scheduling/corev1" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" -) - -// Name is the name of the plugin used in the plugin registry and configurations. -const Name = names.PrioritySort - -// PrioritySort is a plugin that implements Priority based sorting. -type PrioritySort struct{} - -var _ framework.QueueSortPlugin = &PrioritySort{} - -// Name returns name of the plugin. -func (pl *PrioritySort) Name() string { - return Name -} - -// Less is the function used by the activeQ heap algorithm to sort pods. -// It sorts pods based on their priority. When priorities are equal, it uses -// PodQueueInfo.timestamp. -func (pl *PrioritySort) Less(pInfo1, pInfo2 *framework.QueuedPodInfo) bool { - p1 := corev1helpers.PodPriority(pInfo1.Pod) - p2 := corev1helpers.PodPriority(pInfo2.Pod) - return (p1 > p2) || (p1 == p2 && pInfo1.Timestamp.Before(pInfo2.Timestamp)) -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, _ runtime.Object, handle framework.Handle) (framework.Plugin, error) { - return &PrioritySort{}, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/registry.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/registry.go deleted file mode 100644 index 3e3f27092b8..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/registry.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugins - -import ( - "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources" - plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/schedulinggates" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone" - "k8s.io/kubernetes/pkg/scheduler/framework/runtime" -) - -// NewInTreeRegistry builds the registry with all the in-tree plugins. -// A scheduler that runs out of tree plugins can register additional plugins -// through the WithFrameworkOutOfTreeRegistry option. -func NewInTreeRegistry() runtime.Registry { - fts := plfeature.Features{ - EnableDRAPrioritizedList: feature.DefaultFeatureGate.Enabled(features.DRAPrioritizedList), - EnableDRAAdminAccess: feature.DefaultFeatureGate.Enabled(features.DRAAdminAccess), - EnableDRADeviceTaints: feature.DefaultFeatureGate.Enabled(features.DRADeviceTaints), - EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation), - EnableVolumeAttributesClass: feature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass), - EnableCSIMigrationPortworx: feature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx), - EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread), - EnableMatchLabelKeysInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodTopologySpread), - EnableInPlacePodVerticalScaling: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), - EnableSidecarContainers: feature.DefaultFeatureGate.Enabled(features.SidecarContainers), - EnableSchedulingQueueHint: feature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints), - EnableAsyncPreemption: feature.DefaultFeatureGate.Enabled(features.SchedulerAsyncPreemption), - EnablePodLevelResources: feature.DefaultFeatureGate.Enabled(features.PodLevelResources), - EnablePartitionableDevices: feature.DefaultFeatureGate.Enabled(features.DRAPartitionableDevices), - EnableStorageCapacityScoring: feature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring), - } - - registry := runtime.Registry{ - dynamicresources.Name: runtime.FactoryAdapter(fts, dynamicresources.New), - imagelocality.Name: imagelocality.New, - tainttoleration.Name: runtime.FactoryAdapter(fts, tainttoleration.New), - nodename.Name: runtime.FactoryAdapter(fts, nodename.New), - nodeports.Name: runtime.FactoryAdapter(fts, nodeports.New), - nodeaffinity.Name: runtime.FactoryAdapter(fts, nodeaffinity.New), - podtopologyspread.Name: runtime.FactoryAdapter(fts, podtopologyspread.New), - nodeunschedulable.Name: runtime.FactoryAdapter(fts, nodeunschedulable.New), - noderesources.Name: runtime.FactoryAdapter(fts, noderesources.NewFit), - noderesources.BalancedAllocationName: runtime.FactoryAdapter(fts, noderesources.NewBalancedAllocation), - volumebinding.Name: runtime.FactoryAdapter(fts, volumebinding.New), - volumerestrictions.Name: runtime.FactoryAdapter(fts, volumerestrictions.New), - volumezone.Name: runtime.FactoryAdapter(fts, volumezone.New), - nodevolumelimits.CSIName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCSI), - interpodaffinity.Name: runtime.FactoryAdapter(fts, interpodaffinity.New), - queuesort.Name: queuesort.New, - defaultbinder.Name: defaultbinder.New, - defaultpreemption.Name: runtime.FactoryAdapter(fts, defaultpreemption.New), - schedulinggates.Name: runtime.FactoryAdapter(fts, schedulinggates.New), - } - - return registry -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates.go deleted file mode 100644 index bccd4bd8b1d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package schedulinggates - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" - - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// Name of the plugin used in the plugin registry and configurations. -const Name = names.SchedulingGates - -// SchedulingGates checks if a Pod carries .spec.schedulingGates. -type SchedulingGates struct { - enableSchedulingQueueHint bool -} - -var _ framework.PreEnqueuePlugin = &SchedulingGates{} -var _ framework.EnqueueExtensions = &SchedulingGates{} - -func (pl *SchedulingGates) Name() string { - return Name -} - -func (pl *SchedulingGates) PreEnqueue(ctx context.Context, p *v1.Pod) *framework.Status { - if len(p.Spec.SchedulingGates) == 0 { - return nil - } - gates := make([]string, 0, len(p.Spec.SchedulingGates)) - for _, gate := range p.Spec.SchedulingGates { - gates = append(gates, gate.Name) - } - return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("waiting for scheduling gates: %v", gates)) -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *SchedulingGates) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - if !pl.enableSchedulingQueueHint { - return nil, nil - } - // When the QueueingHint feature is enabled, - // the scheduling queue uses Pod/Update Queueing Hint - // to determine whether a Pod's update makes the Pod schedulable or not. - // https://github.com/kubernetes/kubernetes/pull/122234 - return []framework.ClusterEventWithHint{ - // Pods can be more schedulable once it's gates are removed - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.UpdatePodSchedulingGatesEliminated}, QueueingHintFn: pl.isSchedulableAfterUpdatePodSchedulingGatesEliminated}, - }, nil -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, _ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) { - return &SchedulingGates{ - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - }, nil -} - -func (pl *SchedulingGates) isSchedulableAfterUpdatePodSchedulingGatesEliminated(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - _, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - if modifiedPod.UID != pod.UID { - // If the update event is not for targetPod, it wouldn't make targetPod schedulable. - return framework.QueueSkip, nil - } - - return framework.Queue, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go deleted file mode 100644 index 3eb8f5e1a2f..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go +++ /dev/null @@ -1,229 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tainttoleration - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - v1helper "k8s.io/component-helpers/scheduling/corev1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// TaintToleration is a plugin that checks if a pod tolerates a node's taints. -type TaintToleration struct { - handle framework.Handle - enableSchedulingQueueHint bool -} - -var _ framework.FilterPlugin = &TaintToleration{} -var _ framework.PreScorePlugin = &TaintToleration{} -var _ framework.ScorePlugin = &TaintToleration{} -var _ framework.EnqueueExtensions = &TaintToleration{} - -const ( - // Name is the name of the plugin used in the plugin registry and configurations. - Name = names.TaintToleration - // preScoreStateKey is the key in CycleState to TaintToleration pre-computed data for Scoring. - preScoreStateKey = "PreScore" + Name - // ErrReasonNotMatch is the Filter reason status when not matching. - ErrReasonNotMatch = "node(s) had taints that the pod didn't tolerate" -) - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *TaintToleration) Name() string { - return Name -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *TaintToleration) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - if pl.enableSchedulingQueueHint { - return []framework.ClusterEventWithHint{ - // When the QueueingHint feature is enabled, preCheck is eliminated and we don't need additional UpdateNodeLabel. - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeTaint}, QueueingHintFn: pl.isSchedulableAfterNodeChange}, - // When the QueueingHint feature is enabled, - // the scheduling queue uses Pod/Update Queueing Hint - // to determine whether a Pod's update makes the Pod schedulable or not. - // https://github.com/kubernetes/kubernetes/pull/122234 - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.UpdatePodToleration}, QueueingHintFn: pl.isSchedulableAfterPodTolerationChange}, - }, nil - } - - return []framework.ClusterEventWithHint{ - // A note about UpdateNodeLabel event: - // Ideally, it's supposed to register only Add | UpdateNodeTaint because UpdateNodeLabel will never change the result from this plugin. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeTaint | framework.UpdateNodeLabel}, QueueingHintFn: pl.isSchedulableAfterNodeChange}, - // No need to register the Pod event; the update to the unschedulable Pods already triggers the scheduling retry when QHint is disabled. - }, nil -} - -// isSchedulableAfterNodeChange is invoked for all node events reported by -// an informer. It checks whether that change made a previously unschedulable -// pod schedulable. -func (pl *TaintToleration) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - wasUntolerated := true - if originalNode != nil { - _, wasUntolerated = v1helper.FindMatchingUntoleratedTaint(originalNode.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc()) - } - - _, isUntolerated := v1helper.FindMatchingUntoleratedTaint(modifiedNode.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc()) - - if wasUntolerated && !isUntolerated { - logger.V(5).Info("node was created or updated, and this may make the Pod rejected by TaintToleration plugin in the previous scheduling cycle schedulable", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.Queue, nil - } - - logger.V(5).Info("node was created or updated, but it doesn't change the TaintToleration plugin's decision", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) - return framework.QueueSkip, nil -} - -// Filter invoked at the filter extension point. -func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - node := nodeInfo.Node() - - taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc()) - if !isUntolerated { - return nil - } - - errReason := fmt.Sprintf("node(s) had untolerated taint {%s: %s}", taint.Key, taint.Value) - return framework.NewStatus(framework.UnschedulableAndUnresolvable, errReason) -} - -// preScoreState computed at PreScore and used at Score. -type preScoreState struct { - tolerationsPreferNoSchedule []v1.Toleration -} - -// Clone implements the mandatory Clone interface. We don't really copy the data since -// there is no need for that. -func (s *preScoreState) Clone() framework.StateData { - return s -} - -// getAllTolerationEffectPreferNoSchedule gets the list of all Tolerations with Effect PreferNoSchedule or with no effect. -func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationList []v1.Toleration) { - for _, toleration := range tolerations { - // Empty effect means all effects which includes PreferNoSchedule, so we need to collect it as well. - if len(toleration.Effect) == 0 || toleration.Effect == v1.TaintEffectPreferNoSchedule { - tolerationList = append(tolerationList, toleration) - } - } - return -} - -// PreScore builds and writes cycle state used by Score and NormalizeScore. -func (pl *TaintToleration) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status { - tolerationsPreferNoSchedule := getAllTolerationPreferNoSchedule(pod.Spec.Tolerations) - state := &preScoreState{ - tolerationsPreferNoSchedule: tolerationsPreferNoSchedule, - } - cycleState.Write(preScoreStateKey, state) - return nil -} - -func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) { - c, err := cycleState.Read(preScoreStateKey) - if err != nil { - return nil, fmt.Errorf("failed to read %q from cycleState: %w", preScoreStateKey, err) - } - - s, ok := c.(*preScoreState) - if !ok { - return nil, fmt.Errorf("%+v convert to tainttoleration.preScoreState error", c) - } - return s, nil -} - -// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule -func countIntolerableTaintsPreferNoSchedule(taints []v1.Taint, tolerations []v1.Toleration) (intolerableTaints int) { - for _, taint := range taints { - // check only on taints that have effect PreferNoSchedule - if taint.Effect != v1.TaintEffectPreferNoSchedule { - continue - } - - if !v1helper.TolerationsTolerateTaint(tolerations, &taint) { - intolerableTaints++ - } - } - return -} - -// Score invoked at the Score extension point. -func (pl *TaintToleration) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - node := nodeInfo.Node() - - s, err := getPreScoreState(state) - if err != nil { - return 0, framework.AsStatus(err) - } - - score := int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, s.tolerationsPreferNoSchedule)) - return score, nil -} - -// NormalizeScore invoked after scoring all nodes. -func (pl *TaintToleration) NormalizeScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { - return helper.DefaultNormalizeScore(framework.MaxNodeScore, true, scores) -} - -// ScoreExtensions of the Score plugin. -func (pl *TaintToleration) ScoreExtensions() framework.ScoreExtensions { - return pl -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, _ runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { - return &TaintToleration{ - handle: h, - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - }, nil -} - -// isSchedulableAfterPodTolerationChange is invoked whenever a pod's toleration changed. -func (pl *TaintToleration) isSchedulableAfterPodTolerationChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - _, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - if pod.UID == modifiedPod.UID { - // The updated Pod is the unschedulable Pod. - logger.V(5).Info("a new toleration is added for the unschedulable Pod, and it may make it schedulable", "pod", klog.KObj(modifiedPod)) - return framework.Queue, nil - } - - logger.V(5).Info("a new toleration is added for a Pod, but it's an unrelated Pod and wouldn't change the TaintToleration plugin's decision", "pod", klog.KObj(modifiedPod)) - - return framework.QueueSkip, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/OWNERS deleted file mode 100644 index be3245e85e1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-storage-approvers - - cofyc -reviewers: - - sig-storage-reviewers - - cofyc -labels: - - sig/storage diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go deleted file mode 100644 index e12fb08a0d4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volumebinding - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - storagehelpers "k8s.io/component-helpers/storage/volume" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/util/assumecache" -) - -// PVAssumeCache is a AssumeCache for PersistentVolume objects -type PVAssumeCache struct { - *assumecache.AssumeCache - logger klog.Logger -} - -func pvStorageClassIndexFunc(obj interface{}) ([]string, error) { - if pv, ok := obj.(*v1.PersistentVolume); ok { - return []string{storagehelpers.GetPersistentVolumeClass(pv)}, nil - } - return []string{""}, fmt.Errorf("object is not a v1.PersistentVolume: %v", obj) -} - -// NewPVAssumeCache creates a PV assume cache. -func NewPVAssumeCache(logger klog.Logger, informer assumecache.Informer) *PVAssumeCache { - logger = klog.LoggerWithName(logger, "PV Cache") - return &PVAssumeCache{ - AssumeCache: assumecache.NewAssumeCache(logger, informer, "v1.PersistentVolume", "storageclass", pvStorageClassIndexFunc), - logger: logger, - } -} - -func (c *PVAssumeCache) GetPV(pvName string) (*v1.PersistentVolume, error) { - obj, err := c.Get(pvName) - if err != nil { - return nil, err - } - - pv, ok := obj.(*v1.PersistentVolume) - if !ok { - return nil, &assumecache.WrongTypeError{TypeName: "v1.PersistentVolume", Object: obj} - } - return pv, nil -} - -func (c *PVAssumeCache) GetAPIPV(pvName string) (*v1.PersistentVolume, error) { - obj, err := c.GetAPIObj(pvName) - if err != nil { - return nil, err - } - pv, ok := obj.(*v1.PersistentVolume) - if !ok { - return nil, &assumecache.WrongTypeError{TypeName: "v1.PersistentVolume", Object: obj} - } - return pv, nil -} - -func (c *PVAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume { - objs := c.List(&v1.PersistentVolume{ - Spec: v1.PersistentVolumeSpec{ - StorageClassName: storageClassName, - }, - }) - pvs := []*v1.PersistentVolume{} - for _, obj := range objs { - pv, ok := obj.(*v1.PersistentVolume) - if !ok { - c.logger.Error(&assumecache.WrongTypeError{TypeName: "v1.PersistentVolume", Object: obj}, "ListPVs") - continue - } - pvs = append(pvs, pv) - } - return pvs -} - -// PVCAssumeCache is a AssumeCache for PersistentVolumeClaim objects -type PVCAssumeCache struct { - *assumecache.AssumeCache - logger klog.Logger -} - -// NewPVCAssumeCache creates a PVC assume cache. -func NewPVCAssumeCache(logger klog.Logger, informer assumecache.Informer) *PVCAssumeCache { - logger = klog.LoggerWithName(logger, "PVC Cache") - return &PVCAssumeCache{ - AssumeCache: assumecache.NewAssumeCache(logger, informer, "v1.PersistentVolumeClaim", "", nil), - logger: logger, - } -} - -func (c *PVCAssumeCache) GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) { - obj, err := c.Get(pvcKey) - if err != nil { - return nil, err - } - - pvc, ok := obj.(*v1.PersistentVolumeClaim) - if !ok { - return nil, &assumecache.WrongTypeError{TypeName: "v1.PersistentVolumeClaim", Object: obj} - } - return pvc, nil -} - -func (c *PVCAssumeCache) GetAPIPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) { - obj, err := c.GetAPIObj(pvcKey) - if err != nil { - return nil, err - } - pvc, ok := obj.(*v1.PersistentVolumeClaim) - if !ok { - return nil, &assumecache.WrongTypeError{TypeName: "v1.PersistentVolumeClaim", Object: obj} - } - return pvc, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go deleted file mode 100644 index 783d658895a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ /dev/null @@ -1,1124 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volumebinding - -import ( - "context" - "errors" - "fmt" - "sort" - "strings" - "time" - - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/storage" - coreinformers "k8s.io/client-go/informers/core/v1" - storageinformers "k8s.io/client-go/informers/storage/v1" - clientset "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" - storagelisters "k8s.io/client-go/listers/storage/v1" - "k8s.io/component-helpers/storage/ephemeral" - "k8s.io/component-helpers/storage/volume" - csitrans "k8s.io/csi-translation-lib" - csiplugins "k8s.io/csi-translation-lib/plugins" - "k8s.io/klog/v2" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics" - "k8s.io/kubernetes/pkg/scheduler/util/assumecache" -) - -// ConflictReason is used for the special strings which explain why -// volume binding is impossible for a node. -type ConflictReason string - -// ConflictReasons contains all reasons that explain why volume binding is impossible for a node. -type ConflictReasons []ConflictReason - -func (reasons ConflictReasons) Len() int { return len(reasons) } -func (reasons ConflictReasons) Less(i, j int) bool { return reasons[i] < reasons[j] } -func (reasons ConflictReasons) Swap(i, j int) { reasons[i], reasons[j] = reasons[j], reasons[i] } - -const ( - // ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error. - ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind" - // ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error. - ErrReasonNodeConflict ConflictReason = "node(s) didn't match PersistentVolume's node affinity" - // ErrReasonNotEnoughSpace is used when a pod cannot start on a node because not enough storage space is available. - ErrReasonNotEnoughSpace = "node(s) did not have enough free storage" - // ErrReasonPVNotExist is used when a pod has one or more PVC(s) bound to non-existent persistent volume(s)" - ErrReasonPVNotExist = "node(s) unavailable due to one or more pvc(s) bound to non-existent pv(s)" -) - -// BindingInfo holds a binding between PV and PVC. -type BindingInfo struct { - // PVC that needs to be bound - pvc *v1.PersistentVolumeClaim - - // Proposed PV to bind to this PVC - pv *v1.PersistentVolume -} - -// StorageClassName returns the name of the storage class. -func (b *BindingInfo) StorageClassName() string { - return b.pv.Spec.StorageClassName -} - -// StorageResource represents storage resource. -type StorageResource struct { - Requested int64 - Capacity int64 -} - -// StorageResource returns storage resource. -func (b *BindingInfo) StorageResource() *StorageResource { - // both fields are mandatory - requestedQty := b.pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - capacityQty := b.pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] - return &StorageResource{ - Requested: requestedQty.Value(), - Capacity: capacityQty.Value(), - } -} - -// DynamicProvision represents a dynamically provisioned volume. -type DynamicProvision struct { - PVC *v1.PersistentVolumeClaim - NodeCapacity *storagev1.CSIStorageCapacity -} - -// PodVolumes holds pod's volumes information used in volume scheduling. -type PodVolumes struct { - // StaticBindings are binding decisions for PVCs which can be bound to - // pre-provisioned static PVs. - StaticBindings []*BindingInfo - // DynamicProvisions are PVCs that require dynamic provisioning - DynamicProvisions []*DynamicProvision -} - -// InTreeToCSITranslator contains methods required to check migratable status -// and perform translations from InTree PV's to CSI -type InTreeToCSITranslator interface { - IsPVMigratable(pv *v1.PersistentVolume) bool - GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error) - TranslateInTreePVToCSI(logger klog.Logger, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) -} - -// SchedulerVolumeBinder is used by the scheduler VolumeBinding plugin to -// handle PVC/PV binding and dynamic provisioning. The binding decisions are -// integrated into the pod scheduling workflow so that the PV NodeAffinity is -// also considered along with the pod's other scheduling requirements. -// -// This integrates into the existing scheduler workflow as follows: -// 1. The scheduler takes a Pod off the scheduler queue and processes it serially: -// a. Invokes all pre-filter plugins for the pod. GetPodVolumeClaims() is invoked -// here, pod volume information will be saved in current scheduling cycle state for later use. -// b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here. -// c. Invokes all score plugins. Future/TBD -// d. Selects the best node for the Pod. -// e. Invokes all reserve plugins. AssumePodVolumes() is invoked here. -// i. If PVC binding is required, cache in-memory only: -// * For manual binding: update PV objects for prebinding to the corresponding PVCs. -// * For dynamic provisioning: update PVC object with a selected node from c) -// * For the pod, which PVCs and PVs need API updates. -// ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache, -// This is handled in the scheduler and not here. -// f. Asynchronously bind volumes and pod in a separate goroutine -// i. BindPodVolumes() is called first in PreBind phase. It makes all the necessary API updates and waits for -// PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent -// back through the scheduler. -// ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding. -// 2. Once all the assume operations are done in e), the scheduler processes the next Pod in the scheduler queue -// while the actual binding operation occurs in the background. -type SchedulerVolumeBinder interface { - // GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning), - // unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding. - GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) - - // FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the - // node and returns pod's volumes information. - // - // If a PVC is bound, it checks if the PV's NodeAffinity matches the Node. - // Otherwise, it tries to find an available PV to bind to the PVC. - // - // It returns an error when something went wrong or a list of reasons why the node is - // (currently) not usable for the pod. - // - // If the CSIStorageCapacity feature is enabled, then it also checks for sufficient storage - // for volumes that still need to be created. - // - // This function is called by the scheduler VolumeBinding plugin and can be called in parallel - FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) - - // AssumePodVolumes will: - // 1. Take the PV matches for unbound PVCs and update the PV cache assuming - // that the PV is prebound to the PVC. - // 2. Take the PVCs that need provisioning and update the PVC cache with related - // annotations set. - // - // It returns true if all volumes are fully bound - // - // This function is called serially. - AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) - - // RevertAssumedPodVolumes will revert assumed PV and PVC cache. - RevertAssumedPodVolumes(podVolumes *PodVolumes) - - // BindPodVolumes will: - // 1. Initiate the volume binding by making the API call to prebind the PV - // to its matching PVC. - // 2. Trigger the volume provisioning by making the API call to set related - // annotations on the PVC - // 3. Wait for PVCs to be completely bound by the PV controller - // - // This function can be called in parallel. - BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) error -} - -type PodVolumeClaims struct { - // boundClaims are the pod's bound PVCs. - boundClaims []*v1.PersistentVolumeClaim - // unboundClaimsDelayBinding are the pod's unbound with delayed binding (including provisioning) PVCs. - unboundClaimsDelayBinding []*v1.PersistentVolumeClaim - // unboundClaimsImmediate are the pod's unbound with immediate binding PVCs (i.e., supposed to be bound already) . - unboundClaimsImmediate []*v1.PersistentVolumeClaim - // unboundVolumesDelayBinding are PVs that belong to storage classes of the pod's unbound PVCs with delayed binding. - unboundVolumesDelayBinding map[string][]*v1.PersistentVolume -} - -type volumeBinder struct { - kubeClient clientset.Interface - enableVolumeAttributesClass bool - enableCSIMigrationPortworx bool - - classLister storagelisters.StorageClassLister - podLister corelisters.PodLister - nodeLister corelisters.NodeLister - csiNodeLister storagelisters.CSINodeLister - - pvcCache *PVCAssumeCache - pvCache *PVAssumeCache - - // Amount of time to wait for the bind operation to succeed - bindTimeout time.Duration - - translator InTreeToCSITranslator - - csiDriverLister storagelisters.CSIDriverLister - csiStorageCapacityLister storagelisters.CSIStorageCapacityLister -} - -var _ SchedulerVolumeBinder = &volumeBinder{} - -// CapacityCheck contains additional parameters for NewVolumeBinder that -// are only needed when checking volume sizes against available storage -// capacity is desired. -type CapacityCheck struct { - CSIDriverInformer storageinformers.CSIDriverInformer - CSIStorageCapacityInformer storageinformers.CSIStorageCapacityInformer -} - -// NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions. -// -// capacityCheck determines how storage capacity is checked (CSIStorageCapacity feature). -func NewVolumeBinder( - logger klog.Logger, - kubeClient clientset.Interface, - fts feature.Features, - podInformer coreinformers.PodInformer, - nodeInformer coreinformers.NodeInformer, - csiNodeInformer storageinformers.CSINodeInformer, - pvcInformer coreinformers.PersistentVolumeClaimInformer, - pvInformer coreinformers.PersistentVolumeInformer, - storageClassInformer storageinformers.StorageClassInformer, - capacityCheck CapacityCheck, - bindTimeout time.Duration) SchedulerVolumeBinder { - b := &volumeBinder{ - kubeClient: kubeClient, - enableVolumeAttributesClass: fts.EnableVolumeAttributesClass, - enableCSIMigrationPortworx: fts.EnableCSIMigrationPortworx, - podLister: podInformer.Lister(), - classLister: storageClassInformer.Lister(), - nodeLister: nodeInformer.Lister(), - csiNodeLister: csiNodeInformer.Lister(), - pvcCache: NewPVCAssumeCache(logger, pvcInformer.Informer()), - pvCache: NewPVAssumeCache(logger, pvInformer.Informer()), - bindTimeout: bindTimeout, - translator: csitrans.New(), - } - - b.csiDriverLister = capacityCheck.CSIDriverInformer.Lister() - b.csiStorageCapacityLister = capacityCheck.CSIStorageCapacityInformer.Lister() - - return b -} - -// FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs -// for the given pod and node. If the node does not fit, conflict reasons are -// returned. -func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { - podVolumes = &PodVolumes{} - - // Warning: Below log needs high verbosity as it can be printed several times (#60933). - logger.V(5).Info("FindPodVolumes", "pod", klog.KObj(pod), "node", klog.KObj(node)) - - // Initialize to true for pods that don't have volumes. These - // booleans get translated into reason strings when the function - // returns without an error. - unboundVolumesSatisfied := true - boundVolumesSatisfied := true - sufficientStorage := true - boundPVsFound := true - defer func() { - if err != nil { - return - } - if !boundVolumesSatisfied { - reasons = append(reasons, ErrReasonNodeConflict) - } - if !unboundVolumesSatisfied { - reasons = append(reasons, ErrReasonBindConflict) - } - if !sufficientStorage { - reasons = append(reasons, ErrReasonNotEnoughSpace) - } - if !boundPVsFound { - reasons = append(reasons, ErrReasonPVNotExist) - } - }() - - defer func() { - if err != nil { - metrics.VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc() - } - }() - - var ( - staticBindings []*BindingInfo - dynamicProvisions []*DynamicProvision - ) - defer func() { - // Although we do not distinguish nil from empty in this function, for - // easier testing, we normalize empty to nil. - if len(staticBindings) == 0 { - staticBindings = nil - } - if len(dynamicProvisions) == 0 { - dynamicProvisions = nil - } - podVolumes.StaticBindings = staticBindings - podVolumes.DynamicProvisions = dynamicProvisions - }() - - // Check PV node affinity on bound volumes - if len(podVolumeClaims.boundClaims) > 0 { - boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(logger, podVolumeClaims.boundClaims, node, pod) - if err != nil { - return - } - } - - // Find matching volumes and node for unbound claims - if len(podVolumeClaims.unboundClaimsDelayBinding) > 0 { - var ( - claimsToFindMatching []*v1.PersistentVolumeClaim - claimsToProvision []*v1.PersistentVolumeClaim - ) - - // Filter out claims to provision - for _, claim := range podVolumeClaims.unboundClaimsDelayBinding { - if selectedNode, ok := claim.Annotations[volume.AnnSelectedNode]; ok { - if selectedNode != node.Name { - // Fast path, skip unmatched node. - unboundVolumesSatisfied = false - return - } - claimsToProvision = append(claimsToProvision, claim) - } else { - claimsToFindMatching = append(claimsToFindMatching, claim) - } - } - - // Find matching volumes - if len(claimsToFindMatching) > 0 { - var unboundClaims []*v1.PersistentVolumeClaim - unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(logger, pod, claimsToFindMatching, podVolumeClaims.unboundVolumesDelayBinding, node) - if err != nil { - return - } - claimsToProvision = append(claimsToProvision, unboundClaims...) - } - - // Check for claims to provision. This is the first time where we potentially - // find out that storage is not sufficient for the node. - if len(claimsToProvision) > 0 { - unboundVolumesSatisfied, sufficientStorage, dynamicProvisions, err = b.checkVolumeProvisions(logger, pod, claimsToProvision, node) - if err != nil { - return - } - } - } - - return -} - -// ConvertDynamicProvisionsToPVCs converts a slice of *DynamicProvision to a -// slice of PersistentVolumeClaim -func convertDynamicProvisionsToPVCs(dynamicProvisions []*DynamicProvision) []*v1.PersistentVolumeClaim { - pvcs := make([]*v1.PersistentVolumeClaim, 0, len(dynamicProvisions)) - for _, dynamicProvision := range dynamicProvisions { - pvcs = append(pvcs, dynamicProvision.PVC) - } - return pvcs -} - -// AssumePodVolumes will take the matching PVs and PVCs to provision in pod's -// volume information for the chosen node, and: -// 1. Update the pvCache with the new prebound PV. -// 2. Update the pvcCache with the new PVCs with annotations set -// 3. Update PodVolumes again with cached API updates for PVs and PVCs. -func (b *volumeBinder) AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) { - logger.V(4).Info("AssumePodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName)) - defer func() { - if err != nil { - metrics.VolumeSchedulingStageFailed.WithLabelValues("assume").Inc() - } - }() - - if allBound := b.arePodVolumesBound(logger, assumedPod); allBound { - logger.V(4).Info("AssumePodVolumes: all PVCs bound and nothing to do", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName)) - return true, nil - } - - // Assume PV - newBindings := []*BindingInfo{} - for _, binding := range podVolumes.StaticBindings { - newPV, dirty, err := volume.GetBindVolumeToClaim(binding.pv, binding.pvc) - logger.V(5).Info("AssumePodVolumes: GetBindVolumeToClaim", - "pod", klog.KObj(assumedPod), - "PV", klog.KObj(binding.pv), - "PVC", klog.KObj(binding.pvc), - "newPV", klog.KObj(newPV), - "dirty", dirty, - ) - if err != nil { - logger.Error(err, "AssumePodVolumes: fail to GetBindVolumeToClaim") - b.revertAssumedPVs(newBindings) - return false, err - } - // TODO: can we assume every time? - if dirty { - err = b.pvCache.Assume(newPV) - if err != nil { - b.revertAssumedPVs(newBindings) - return false, err - } - } - newBindings = append(newBindings, &BindingInfo{pv: newPV, pvc: binding.pvc}) - } - - // Assume PVCs - newProvisionedPVCs := []*DynamicProvision{} - for _, dynamicProvision := range podVolumes.DynamicProvisions { - // The claims from method args can be pointing to watcher cache. We must not - // modify these, therefore create a copy. - claimClone := dynamicProvision.PVC.DeepCopy() - metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, volume.AnnSelectedNode, nodeName) - err = b.pvcCache.Assume(claimClone) - if err != nil { - pvcs := convertDynamicProvisionsToPVCs(newProvisionedPVCs) - b.revertAssumedPVs(newBindings) - b.revertAssumedPVCs(pvcs) - return - } - - newProvisionedPVCs = append(newProvisionedPVCs, &DynamicProvision{PVC: claimClone}) - } - - podVolumes.StaticBindings = newBindings - podVolumes.DynamicProvisions = newProvisionedPVCs - return -} - -// RevertAssumedPodVolumes will revert assumed PV and PVC cache. -func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) { - pvcs := convertDynamicProvisionsToPVCs(podVolumes.DynamicProvisions) - b.revertAssumedPVs(podVolumes.StaticBindings) - b.revertAssumedPVCs(pvcs) -} - -// BindPodVolumes gets the cached bindings and PVCs to provision in pod's volumes information, -// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound -// by the PV controller. -func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) (err error) { - logger := klog.FromContext(ctx) - logger.V(4).Info("BindPodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", assumedPod.Spec.NodeName)) - - defer func() { - if err != nil { - metrics.VolumeSchedulingStageFailed.WithLabelValues("bind").Inc() - } - }() - - bindings := podVolumes.StaticBindings - claimsToProvision := convertDynamicProvisionsToPVCs(podVolumes.DynamicProvisions) - - // Start API operations - err = b.bindAPIUpdate(ctx, assumedPod, bindings, claimsToProvision) - if err != nil { - return err - } - - err = wait.PollUntilContextTimeout(ctx, time.Second, b.bindTimeout, false, func(ctx context.Context) (bool, error) { - b, err := b.checkBindings(logger, assumedPod, bindings, claimsToProvision) - return b, err - }) - if err != nil { - return fmt.Errorf("binding volumes: %w", err) - } - return nil -} - -func getPodName(pod *v1.Pod) string { - return pod.Namespace + "/" + pod.Name -} - -func getPVCName(pvc *v1.PersistentVolumeClaim) string { - return pvc.Namespace + "/" + pvc.Name -} - -// bindAPIUpdate makes the API update for those PVs/PVCs. -func (b *volumeBinder) bindAPIUpdate(ctx context.Context, pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error { - logger := klog.FromContext(ctx) - podName := getPodName(pod) - if bindings == nil { - return fmt.Errorf("failed to get cached bindings for pod %q", podName) - } - if claimsToProvision == nil { - return fmt.Errorf("failed to get cached claims to provision for pod %q", podName) - } - - lastProcessedBinding := 0 - lastProcessedProvisioning := 0 - defer func() { - // only revert assumed cached updates for volumes we haven't successfully bound - if lastProcessedBinding < len(bindings) { - b.revertAssumedPVs(bindings[lastProcessedBinding:]) - } - // only revert assumed cached updates for claims we haven't updated, - if lastProcessedProvisioning < len(claimsToProvision) { - b.revertAssumedPVCs(claimsToProvision[lastProcessedProvisioning:]) - } - }() - - var ( - binding *BindingInfo - i int - claim *v1.PersistentVolumeClaim - ) - - // Do the actual prebinding. Let the PV controller take care of the rest - // There is no API rollback if the actual binding fails - for _, binding = range bindings { - // TODO: does it hurt if we make an api call and nothing needs to be updated? - logger.V(5).Info("Updating PersistentVolume: binding to claim", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc)) - newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(ctx, binding.pv, metav1.UpdateOptions{}) - if err != nil { - logger.V(4).Info("Updating PersistentVolume: binding to claim failed", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc), "err", err) - return err - } - - logger.V(2).Info("Updated PersistentVolume with claim. Waiting for binding to complete", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc)) - // Save updated object from apiserver for later checking. - binding.pv = newPV - lastProcessedBinding++ - } - - // Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest - // PV controller is expected to signal back by removing related annotations if actual provisioning fails - for i, claim = range claimsToProvision { - logger.V(5).Info("Updating claims objects to trigger volume provisioning", "pod", klog.KObj(pod), "PVC", klog.KObj(claim)) - newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - logger.V(4).Info("Updating PersistentVolumeClaim: binding to volume failed", "PVC", klog.KObj(claim), "err", err) - return err - } - - // Save updated object from apiserver for later checking. - claimsToProvision[i] = newClaim - lastProcessedProvisioning++ - } - - return nil -} - -var ( - versioner = storage.APIObjectVersioner{} -) - -// checkBindings runs through all the PVCs in the Pod and checks: -// * if the PVC is fully bound -// * if there are any conditions that require binding to fail and be retried -// -// It returns true when all of the Pod's PVCs are fully bound, and error if -// binding (and scheduling) needs to be retried -// Note that it checks on API objects not PV/PVC cache, this is because -// PV/PVC cache can be assumed again in main scheduler loop, we must check -// latest state in API server which are shared with PV controller and -// provisioners -func (b *volumeBinder) checkBindings(logger klog.Logger, pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) { - podName := getPodName(pod) - if bindings == nil { - return false, fmt.Errorf("failed to get cached bindings for pod %q", podName) - } - if claimsToProvision == nil { - return false, fmt.Errorf("failed to get cached claims to provision for pod %q", podName) - } - - node, err := b.nodeLister.Get(pod.Spec.NodeName) - if err != nil { - return false, fmt.Errorf("failed to get node %q: %w", pod.Spec.NodeName, err) - } - - csiNode, err := b.csiNodeLister.Get(node.Name) - if err != nil { - // TODO: return the error once CSINode is created by default - logger.V(4).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) - } - - // Check for any conditions that might require scheduling retry - - // When pod is deleted, binding operation should be cancelled. There is no - // need to check PV/PVC bindings any more. - _, err = b.podLister.Pods(pod.Namespace).Get(pod.Name) - if err != nil { - if apierrors.IsNotFound(err) { - return false, fmt.Errorf("pod does not exist any more: %w", err) - } - logger.Error(err, "Failed to get pod from the lister", "pod", klog.KObj(pod)) - } - - for _, binding := range bindings { - pv, err := b.pvCache.GetAPIPV(binding.pv.Name) - if err != nil { - return false, fmt.Errorf("failed to check binding: %w", err) - } - - pvc, err := b.pvcCache.GetAPIPVC(getPVCName(binding.pvc)) - if err != nil { - return false, fmt.Errorf("failed to check binding: %w", err) - } - - // Because we updated PV in apiserver, skip if API object is older - // and wait for new API object propagated from apiserver. - if versioner.CompareResourceVersion(binding.pv, pv) > 0 { - return false, nil - } - - pv, err = b.tryTranslatePVToCSI(logger, pv, csiNode) - if err != nil { - return false, fmt.Errorf("failed to translate pv to csi: %w", err) - } - - // Check PV's node affinity (the node might not have the proper label) - if err := volume.CheckNodeAffinity(pv, node.Labels); err != nil { - return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %w", pv.Name, node.Name, err) - } - - // Check if pv.ClaimRef got dropped by unbindVolume() - if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID == "" { - return false, fmt.Errorf("ClaimRef got reset for pv %q", pv.Name) - } - - // Check if pvc is fully bound - if !b.isPVCFullyBound(pvc) { - return false, nil - } - } - - for _, claim := range claimsToProvision { - pvc, err := b.pvcCache.GetAPIPVC(getPVCName(claim)) - if err != nil { - return false, fmt.Errorf("failed to check provisioning pvc: %w", err) - } - - // Because we updated PVC in apiserver, skip if API object is older - // and wait for new API object propagated from apiserver. - if versioner.CompareResourceVersion(claim, pvc) > 0 { - return false, nil - } - - // Check if selectedNode annotation is still set - if pvc.Annotations == nil { - return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name) - } - selectedNode := pvc.Annotations[volume.AnnSelectedNode] - if selectedNode != pod.Spec.NodeName { - // If provisioner fails to provision a volume, selectedNode - // annotation will be removed to signal back to the scheduler to - // retry. - return false, fmt.Errorf("provisioning failed for PVC %q", pvc.Name) - } - - // If the PVC is bound to a PV, check its node affinity - if pvc.Spec.VolumeName != "" { - pv, err := b.pvCache.GetAPIPV(pvc.Spec.VolumeName) - if err != nil { - if errors.Is(err, assumecache.ErrNotFound) { - // We tolerate NotFound error here, because PV is possibly - // not found because of API delay, we can check next time. - // And if PV does not exist because it's deleted, PVC will - // be unbound eventually. - return false, nil - } - return false, fmt.Errorf("failed to get pv %q from cache: %w", pvc.Spec.VolumeName, err) - } - - pv, err = b.tryTranslatePVToCSI(logger, pv, csiNode) - if err != nil { - return false, err - } - - if err := volume.CheckNodeAffinity(pv, node.Labels); err != nil { - return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %w", pv.Name, node.Name, err) - } - } - - // Check if pvc is fully bound - if !b.isPVCFullyBound(pvc) { - return false, nil - } - } - - // All pvs and pvcs that we operated on are bound - logger.V(2).Info("All PVCs for pod are bound", "pod", klog.KObj(pod)) - return true, nil -} - -func (b *volumeBinder) isVolumeBound(logger klog.Logger, pod *v1.Pod, vol *v1.Volume) (bound bool, pvc *v1.PersistentVolumeClaim, err error) { - pvcName := "" - isEphemeral := false - switch { - case vol.PersistentVolumeClaim != nil: - pvcName = vol.PersistentVolumeClaim.ClaimName - case vol.Ephemeral != nil: - // Generic ephemeral inline volumes also use a PVC, - // just with a computed name, and... - pvcName = ephemeral.VolumeClaimName(pod, vol) - isEphemeral = true - default: - return true, nil, nil - } - - bound, pvc, err = b.isPVCBound(logger, pod.Namespace, pvcName) - // ... the PVC must be owned by the pod. - if isEphemeral && err == nil && pvc != nil { - if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil { - return false, nil, err - } - } - return -} - -func (b *volumeBinder) isPVCBound(logger klog.Logger, namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) { - claim := &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: pvcName, - Namespace: namespace, - }, - } - pvcKey := getPVCName(claim) - pvc, err := b.pvcCache.GetPVC(pvcKey) - if err != nil || pvc == nil { - return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcKey, err) - } - - fullyBound := b.isPVCFullyBound(pvc) - if fullyBound { - logger.V(5).Info("PVC is fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName)) - } else { - if pvc.Spec.VolumeName != "" { - logger.V(5).Info("PVC is not fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName)) - } else { - logger.V(5).Info("PVC is not bound", "PVC", klog.KObj(pvc)) - } - } - return fullyBound, pvc, nil -} - -func (b *volumeBinder) isPVCFullyBound(pvc *v1.PersistentVolumeClaim) bool { - return pvc.Spec.VolumeName != "" && metav1.HasAnnotation(pvc.ObjectMeta, volume.AnnBindCompleted) -} - -// arePodVolumesBound returns true if all volumes are fully bound -func (b *volumeBinder) arePodVolumesBound(logger klog.Logger, pod *v1.Pod) bool { - for _, vol := range pod.Spec.Volumes { - if isBound, _, _ := b.isVolumeBound(logger, pod, &vol); !isBound { - // Pod has at least one PVC that needs binding - return false - } - } - return true -} - -// GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning), -// unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding. -func (b *volumeBinder) GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) { - podVolumeClaims = &PodVolumeClaims{ - boundClaims: []*v1.PersistentVolumeClaim{}, - unboundClaimsImmediate: []*v1.PersistentVolumeClaim{}, - unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{}, - } - - for _, vol := range pod.Spec.Volumes { - volumeBound, pvc, err := b.isVolumeBound(logger, pod, &vol) - if err != nil { - return podVolumeClaims, err - } - if pvc == nil { - continue - } - if volumeBound { - podVolumeClaims.boundClaims = append(podVolumeClaims.boundClaims, pvc) - } else { - delayBindingMode, err := volume.IsDelayBindingMode(pvc, b.classLister) - if err != nil { - return podVolumeClaims, err - } - // Prebound PVCs are treated as unbound immediate binding - if delayBindingMode && pvc.Spec.VolumeName == "" { - // Scheduler path - podVolumeClaims.unboundClaimsDelayBinding = append(podVolumeClaims.unboundClaimsDelayBinding, pvc) - } else { - // !delayBindingMode || pvc.Spec.VolumeName != "" - // Immediate binding should have already been bound - podVolumeClaims.unboundClaimsImmediate = append(podVolumeClaims.unboundClaimsImmediate, pvc) - } - } - } - - podVolumeClaims.unboundVolumesDelayBinding = map[string][]*v1.PersistentVolume{} - for _, pvc := range podVolumeClaims.unboundClaimsDelayBinding { - // Get storage class name from each PVC - storageClassName := volume.GetPersistentVolumeClaimClass(pvc) - podVolumeClaims.unboundVolumesDelayBinding[storageClassName] = b.pvCache.ListPVs(storageClassName) - } - return podVolumeClaims, nil -} - -func (b *volumeBinder) checkBoundClaims(logger klog.Logger, claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) { - csiNode, err := b.csiNodeLister.Get(node.Name) - if err != nil { - // TODO: return the error once CSINode is created by default - logger.V(4).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) - } - - for _, pvc := range claims { - pvName := pvc.Spec.VolumeName - pv, err := b.pvCache.GetPV(pvName) - if err != nil { - if errors.Is(err, assumecache.ErrNotFound) { - err = nil - } - return true, false, err - } - - pv, err = b.tryTranslatePVToCSI(logger, pv, csiNode) - if err != nil { - return false, true, err - } - - err = volume.CheckNodeAffinity(pv, node.Labels) - if err != nil { - logger.V(4).Info("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err) - return false, true, nil - } - logger.V(5).Info("PersistentVolume and node matches for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod)) - } - - logger.V(4).Info("All bound volumes for pod match with node", "pod", klog.KObj(pod), "node", klog.KObj(node)) - return true, true, nil -} - -// findMatchingVolumes tries to find matching volumes for given claims, -// and return unbound claims for further provision. -func (b *volumeBinder) findMatchingVolumes(logger klog.Logger, pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, unboundVolumesDelayBinding map[string][]*v1.PersistentVolume, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { - // Sort all the claims by increasing size request to get the smallest fits - sort.Sort(byPVCSize(claimsToBind)) - - chosenPVs := map[string]*v1.PersistentVolume{} - - foundMatches = true - - for _, pvc := range claimsToBind { - // Get storage class name from each PVC - storageClassName := volume.GetPersistentVolumeClaimClass(pvc) - pvs := unboundVolumesDelayBinding[storageClassName] - - // Find a matching PV - pv, err := volume.FindMatchingVolume(pvc, pvs, node, chosenPVs, true, b.enableVolumeAttributesClass) - if err != nil { - return false, nil, nil, err - } - if pv == nil { - logger.V(4).Info("No matching volumes for pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc), "node", klog.KObj(node)) - unboundClaims = append(unboundClaims, pvc) - foundMatches = false - continue - } - - // matching PV needs to be excluded so we don't select it again - chosenPVs[pv.Name] = pv - bindings = append(bindings, &BindingInfo{pv: pv, pvc: pvc}) - logger.V(5).Info("Found matching PV for PVC for pod", "PV", klog.KObj(pv), "PVC", klog.KObj(pvc), "node", klog.KObj(node), "pod", klog.KObj(pod)) - } - - if foundMatches { - logger.V(4).Info("Found matching volumes for pod", "pod", klog.KObj(pod), "node", klog.KObj(node)) - } - - return -} - -// checkVolumeProvisions checks given unbound claims (the claims have gone through func -// findMatchingVolumes, and do not have matching volumes for binding), and return true -// if all of the claims are eligible for dynamic provision. -func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*DynamicProvision, err error) { - dynamicProvisions = []*DynamicProvision{} - - // We return early with provisionedClaims == nil if a check - // fails or we encounter an error. - for _, claim := range claimsToProvision { - pvcName := getPVCName(claim) - className := volume.GetPersistentVolumeClaimClass(claim) - if className == "" { - return false, false, nil, fmt.Errorf("no class for claim %q", pvcName) - } - - class, err := b.classLister.Get(className) - if err != nil { - return false, false, nil, fmt.Errorf("failed to find storage class %q", className) - } - provisioner := class.Provisioner - if provisioner == "" || provisioner == volume.NotSupportedProvisioner { - logger.V(4).Info("Storage class of claim does not support dynamic provisioning", "storageClassName", className, "PVC", klog.KObj(claim)) - return false, true, nil, nil - } - - // Check if the node can satisfy the topology requirement in the class - if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) { - logger.V(4).Info("Node cannot satisfy provisioning topology requirements of claim", "node", klog.KObj(node), "PVC", klog.KObj(claim)) - return false, true, nil, nil - } - - // Check storage capacity. - sufficient, capacity, err := b.hasEnoughCapacity(logger, provisioner, claim, class, node) - if err != nil { - return false, false, nil, err - } - if !sufficient { - // hasEnoughCapacity logs an explanation. - return true, false, nil, nil - } - - dynamicProvisions = append(dynamicProvisions, &DynamicProvision{ - PVC: claim, - NodeCapacity: capacity, - }) - } - logger.V(4).Info("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node)) - - return true, true, dynamicProvisions, nil -} - -func (b *volumeBinder) revertAssumedPVs(bindings []*BindingInfo) { - for _, BindingInfo := range bindings { - b.pvCache.Restore(BindingInfo.pv.Name) - } -} - -func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) { - for _, claim := range claims { - b.pvcCache.Restore(getPVCName(claim)) - } -} - -// hasEnoughCapacity checks whether the provisioner has enough capacity left for a new volume of the given size -// that is available from the node. This function returns the node capacity based on the PVC's storage class. -func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string, claim *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass, node *v1.Node) (bool, *storagev1.CSIStorageCapacity, error) { - quantity, ok := claim.Spec.Resources.Requests[v1.ResourceStorage] - if !ok { - // No capacity to check for. - return true, nil, nil - } - - // Only enabled for CSI drivers which opt into it. - driver, err := b.csiDriverLister.Get(provisioner) - if err != nil { - if apierrors.IsNotFound(err) { - // Either the provisioner is not a CSI driver or the driver does not - // opt into storage capacity scheduling. Either way, skip - // capacity checking. - return true, nil, nil - } - return false, nil, err - } - if driver.Spec.StorageCapacity == nil || !*driver.Spec.StorageCapacity { - return true, nil, nil - } - - // Look for a matching CSIStorageCapacity object(s). - // TODO (for beta): benchmark this and potentially introduce some kind of lookup structure (https://github.com/kubernetes/enhancements/issues/1698#issuecomment-654356718). - capacities, err := b.csiStorageCapacityLister.List(labels.Everything()) - if err != nil { - return false, nil, err - } - - sizeInBytes := quantity.Value() - for _, capacity := range capacities { - if capacity.StorageClassName == storageClass.Name && - capacitySufficient(capacity, sizeInBytes) && - b.nodeHasAccess(logger, node, capacity) { - // Enough capacity found. - return true, capacity, nil - } - } - - // TODO (?): this doesn't give any information about which pools where considered and why - // they had to be rejected. Log that above? But that might be a lot of log output... - logger.V(4).Info("Node has no accessible CSIStorageCapacity with enough capacity for PVC", - "node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass)) - return false, nil, nil -} - -func capacitySufficient(capacity *storagev1.CSIStorageCapacity, sizeInBytes int64) bool { - limit := volumeLimit(capacity) - return limit != nil && limit.Value() >= sizeInBytes -} - -func volumeLimit(capacity *storagev1.CSIStorageCapacity) *resource.Quantity { - if capacity.MaximumVolumeSize != nil { - // Prefer MaximumVolumeSize if available, it is more precise. - return capacity.MaximumVolumeSize - } - return capacity.Capacity -} - -func (b *volumeBinder) nodeHasAccess(logger klog.Logger, node *v1.Node, capacity *storagev1.CSIStorageCapacity) bool { - if capacity.NodeTopology == nil { - // Unavailable - return false - } - // Only matching by label is supported. - selector, err := metav1.LabelSelectorAsSelector(capacity.NodeTopology) - if err != nil { - logger.Error(err, "Unexpected error converting to a label selector", "nodeTopology", capacity.NodeTopology) - return false - } - return selector.Matches(labels.Set(node.Labels)) -} - -type byPVCSize []*v1.PersistentVolumeClaim - -func (a byPVCSize) Len() int { - return len(a) -} - -func (a byPVCSize) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a byPVCSize) Less(i, j int) bool { - iSize := a[i].Spec.Resources.Requests[v1.ResourceStorage] - jSize := a[j].Spec.Resources.Requests[v1.ResourceStorage] - // return true if iSize is less than jSize - return iSize.Cmp(jSize) == -1 -} - -// isCSIMigrationOnForPlugin checks if CSI migration is enabled for a given plugin. -func isCSIMigrationOnForPlugin(pluginName string, enableCSIMigrationPortworx bool) bool { - switch pluginName { - case csiplugins.AWSEBSInTreePluginName: - return true - case csiplugins.GCEPDInTreePluginName: - return true - case csiplugins.AzureDiskInTreePluginName: - return true - case csiplugins.CinderInTreePluginName: - return true - case csiplugins.PortworxVolumePluginName: - return enableCSIMigrationPortworx - } - return false -} - -// isPluginMigratedToCSIOnNode checks if an in-tree plugin has been migrated to a CSI driver on the node. -func isPluginMigratedToCSIOnNode(pluginName string, csiNode *storagev1.CSINode) bool { - if csiNode == nil { - return false - } - - csiNodeAnn := csiNode.GetAnnotations() - if csiNodeAnn == nil { - return false - } - - var mpaSet sets.Set[string] - mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey] - if len(mpa) == 0 { - mpaSet = sets.New[string]() - } else { - tok := strings.Split(mpa, ",") - mpaSet = sets.New(tok...) - } - - return mpaSet.Has(pluginName) -} - -// tryTranslatePVToCSI will translate the in-tree PV to CSI if it meets the criteria. If not, it returns the unmodified in-tree PV. -func (b *volumeBinder) tryTranslatePVToCSI(logger klog.Logger, pv *v1.PersistentVolume, csiNode *storagev1.CSINode) (*v1.PersistentVolume, error) { - if !b.translator.IsPVMigratable(pv) { - return pv, nil - } - - pluginName, err := b.translator.GetInTreePluginNameFromSpec(pv, nil) - if err != nil { - return nil, fmt.Errorf("could not get plugin name from pv: %v", err) - } - - if !isCSIMigrationOnForPlugin(pluginName, b.enableCSIMigrationPortworx) { - return pv, nil - } - - if !isPluginMigratedToCSIOnNode(pluginName, csiNode) { - return pv, nil - } - - transPV, err := b.translator.TranslateInTreePVToCSI(logger, pv) - if err != nil { - return nil, fmt.Errorf("could not translate pv: %v", err) - } - - return transPV, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go deleted file mode 100644 index f563c3c7563..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volumebinding - -import ( - "context" - - v1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" -) - -// FakeVolumeBinderConfig holds configurations for fake volume binder. -type FakeVolumeBinderConfig struct { - AllBound bool - FindReasons ConflictReasons - FindErr error - AssumeErr error - BindErr error -} - -// NewFakeVolumeBinder sets up all the caches needed for the scheduler to make -// topology-aware volume binding decisions. -func NewFakeVolumeBinder(config *FakeVolumeBinderConfig) *FakeVolumeBinder { - return &FakeVolumeBinder{ - config: config, - } -} - -// FakeVolumeBinder represents a fake volume binder for testing. -type FakeVolumeBinder struct { - config *FakeVolumeBinderConfig - AssumeCalled bool - BindCalled bool -} - -var _ SchedulerVolumeBinder = &FakeVolumeBinder{} - -// GetPodVolumeClaims implements SchedulerVolumeBinder.GetPodVolumes. -func (b *FakeVolumeBinder) GetPodVolumeClaims(_ klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) { - return &PodVolumeClaims{}, nil -} - -// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes. -func (b *FakeVolumeBinder) FindPodVolumes(_ klog.Logger, pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { - return nil, b.config.FindReasons, b.config.FindErr -} - -// AssumePodVolumes implements SchedulerVolumeBinder.AssumePodVolumes. -func (b *FakeVolumeBinder) AssumePodVolumes(_ klog.Logger, assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (bool, error) { - b.AssumeCalled = true - return b.config.AllBound, b.config.AssumeErr -} - -// RevertAssumedPodVolumes implements SchedulerVolumeBinder.RevertAssumedPodVolumes -func (b *FakeVolumeBinder) RevertAssumedPodVolumes(_ *PodVolumes) {} - -// BindPodVolumes implements SchedulerVolumeBinder.BindPodVolumes. -func (b *FakeVolumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) error { - b.BindCalled = true - return b.config.BindErr -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics/metrics.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics/metrics.go deleted file mode 100644 index a52b1963292..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics/metrics.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "k8s.io/component-base/metrics" - "k8s.io/component-base/metrics/legacyregistry" -) - -// VolumeSchedulerSubsystem - subsystem name used by scheduler -const VolumeSchedulerSubsystem = "scheduler_volume" - -var ( - // VolumeBindingRequestSchedulerBinderCache tracks the number of volume binder cache operations. - VolumeBindingRequestSchedulerBinderCache = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: VolumeSchedulerSubsystem, - Name: "binder_cache_requests_total", - Help: "Total number for request volume binding cache", - StabilityLevel: metrics.ALPHA, - }, - []string{"operation"}, - ) - // VolumeSchedulingStageFailed tracks the number of failed volume scheduling operations. - VolumeSchedulingStageFailed = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: VolumeSchedulerSubsystem, - Name: "scheduling_stage_error_total", - Help: "Volume scheduling stage error count", - StabilityLevel: metrics.ALPHA, - }, - []string{"operation"}, - ) -) - -// RegisterVolumeSchedulingMetrics is used for scheduler, because the volume binding cache is a library -// used by scheduler process. -func RegisterVolumeSchedulingMetrics() { - legacyregistry.MustRegister(VolumeBindingRequestSchedulerBinderCache) - legacyregistry.MustRegister(VolumeSchedulingStageFailed) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/scorer.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/scorer.go deleted file mode 100644 index 4c079f28dae..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/scorer.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volumebinding - -import ( - "math" - - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" -) - -// classResourceMap holds a map of storage class to resource. -type classResourceMap map[string]*StorageResource - -// volumeCapacityScorer calculates the score based on class storage resource information. -type volumeCapacityScorer func(classResourceMap) int64 - -// buildScorerFunction builds volumeCapacityScorer from the scoring function shape. -func buildScorerFunction(scoringFunctionShape helper.FunctionShape) volumeCapacityScorer { - rawScoringFunction := helper.BuildBrokenLinearFunction(scoringFunctionShape) - f := func(requested, capacity int64) int64 { - if capacity == 0 || requested > capacity { - return rawScoringFunction(maxUtilization) - } - - return rawScoringFunction(requested * maxUtilization / capacity) - } - return func(classResources classResourceMap) int64 { - var nodeScore int64 - // in alpha stage, all classes have the same weight - weightSum := len(classResources) - if weightSum == 0 { - return 0 - } - for _, resource := range classResources { - classScore := f(resource.Requested, resource.Capacity) - nodeScore += classScore - } - return int64(math.Round(float64(nodeScore) / float64(weightSum))) - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/test_utils.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/test_utils.go deleted file mode 100644 index 23675f969d6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/test_utils.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volumebinding - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/component-helpers/storage/volume" - "k8s.io/utils/ptr" -) - -type nodeBuilder struct { - *v1.Node -} - -func makeNode(name string) nodeBuilder { - return nodeBuilder{Node: &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - v1.LabelHostname: name, - }, - }, - }} -} - -func (nb nodeBuilder) withLabel(key, value string) nodeBuilder { - if nb.Node.ObjectMeta.Labels == nil { - nb.Node.ObjectMeta.Labels = map[string]string{} - } - nb.Node.ObjectMeta.Labels[key] = value - return nb -} - -type pvBuilder struct { - *v1.PersistentVolume -} - -func makePV(name, className string) pvBuilder { - return pvBuilder{PersistentVolume: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: v1.PersistentVolumeSpec{ - StorageClassName: className, - }, - }} -} - -func (pvb pvBuilder) withNodeAffinity(keyValues map[string][]string) pvBuilder { - matchExpressions := make([]v1.NodeSelectorRequirement, 0) - for key, values := range keyValues { - matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{ - Key: key, - Operator: v1.NodeSelectorOpIn, - Values: values, - }) - } - pvb.PersistentVolume.Spec.NodeAffinity = &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: matchExpressions, - }, - }, - }, - } - return pvb -} - -func (pvb pvBuilder) withVersion(version string) pvBuilder { - pvb.PersistentVolume.ObjectMeta.ResourceVersion = version - return pvb -} - -func (pvb pvBuilder) withCapacity(capacity resource.Quantity) pvBuilder { - pvb.PersistentVolume.Spec.Capacity = v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): capacity, - } - return pvb -} - -func (pvb pvBuilder) withPhase(phase v1.PersistentVolumePhase) pvBuilder { - pvb.PersistentVolume.Status = v1.PersistentVolumeStatus{ - Phase: phase, - } - return pvb -} - -type pvcBuilder struct { - *v1.PersistentVolumeClaim -} - -func makePVC(name string, storageClassName string) pvcBuilder { - return pvcBuilder{PersistentVolumeClaim: &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: v1.NamespaceDefault, - }, - Spec: v1.PersistentVolumeClaimSpec{ - StorageClassName: ptr.To(storageClassName), - }, - }} -} - -func (pvcb pvcBuilder) withBoundPV(pvName string) pvcBuilder { - pvcb.PersistentVolumeClaim.Spec.VolumeName = pvName - metav1.SetMetaDataAnnotation(&pvcb.PersistentVolumeClaim.ObjectMeta, volume.AnnBindCompleted, "true") - return pvcb -} - -func (pvcb pvcBuilder) withRequestStorage(request resource.Quantity) pvcBuilder { - pvcb.PersistentVolumeClaim.Spec.Resources = v1.VolumeResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): request, - }, - } - return pvcb -} - -func (pvcb pvcBuilder) withPhase(phase v1.PersistentVolumeClaimPhase) pvcBuilder { - pvcb.PersistentVolumeClaim.Status = v1.PersistentVolumeClaimStatus{ - Phase: phase, - } - return pvcb -} - -type podBuilder struct { - *v1.Pod -} - -func makePod(name string) podBuilder { - pb := podBuilder{Pod: &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: v1.NamespaceDefault, - }, - }} - pb.Pod.Spec.Volumes = make([]v1.Volume, 0) - return pb -} - -func (pb podBuilder) withNodeName(name string) podBuilder { - pb.Pod.Spec.NodeName = name - return pb -} - -func (pb podBuilder) withNamespace(name string) podBuilder { - pb.Pod.ObjectMeta.Namespace = name - return pb -} - -func (pb podBuilder) withPVCVolume(pvcName, name string) podBuilder { - pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{ - Name: name, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }, - }) - return pb -} - -func (pb podBuilder) withPVCSVolume(pvcs []*v1.PersistentVolumeClaim) podBuilder { - for i, pvc := range pvcs { - pb.withPVCVolume(pvc.Name, fmt.Sprintf("vol%v", i)) - } - return pb -} - -func (pb podBuilder) withEmptyDirVolume() podBuilder { - pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{ - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - }) - return pb -} - -func (pb podBuilder) withGenericEphemeralVolume(name string) podBuilder { - pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{ - Name: name, - VolumeSource: v1.VolumeSource{ - Ephemeral: &v1.EphemeralVolumeSource{}, - }, - }) - return pb -} - -func (pb podBuilder) withCSI(driver string) podBuilder { - pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{ - VolumeSource: v1.VolumeSource{ - CSI: &v1.CSIVolumeSource{ - Driver: driver, - }, - }, - }) - return pb -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go deleted file mode 100644 index 16179ec6bb9..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ /dev/null @@ -1,630 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volumebinding - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - corelisters "k8s.io/client-go/listers/core/v1" - storagelisters "k8s.io/client-go/listers/storage/v1" - "k8s.io/component-helpers/storage/ephemeral" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -const ( - stateKey framework.StateKey = Name - - maxUtilization = 100 -) - -// the state is initialized in PreFilter phase. because we save the pointer in -// framework.CycleState, in the later phases we don't need to call Write method -// to update the value -type stateData struct { - allBound bool - // podVolumesByNode holds the pod's volume information found in the Filter - // phase for each node - // it's initialized in the PreFilter phase - podVolumesByNode map[string]*PodVolumes - podVolumeClaims *PodVolumeClaims - // hasStaticBindings declares whether the pod contains one or more StaticBinding. - // If not, vloumeBinding will skip score extension point. - hasStaticBindings bool - sync.Mutex -} - -func (d *stateData) Clone() framework.StateData { - return d -} - -// VolumeBinding is a plugin that binds pod volumes in scheduling. -// In the Filter phase, pod binding cache is created for the pod and used in -// Reserve and PreBind phases. -type VolumeBinding struct { - Binder SchedulerVolumeBinder - PVCLister corelisters.PersistentVolumeClaimLister - classLister storagelisters.StorageClassLister - scorer volumeCapacityScorer - fts feature.Features -} - -var _ framework.PreFilterPlugin = &VolumeBinding{} -var _ framework.FilterPlugin = &VolumeBinding{} -var _ framework.ReservePlugin = &VolumeBinding{} -var _ framework.PreBindPlugin = &VolumeBinding{} -var _ framework.PreScorePlugin = &VolumeBinding{} -var _ framework.ScorePlugin = &VolumeBinding{} -var _ framework.EnqueueExtensions = &VolumeBinding{} - -// Name is the name of the plugin used in Registry and configurations. -const Name = names.VolumeBinding - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *VolumeBinding) Name() string { - return Name -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *VolumeBinding) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - // Pods may fail to find available PVs because the node labels do not - // match the storage class's allowed topologies or PV's node affinity. - // A new or updated node may make pods schedulable. - // - // A note about UpdateNodeTaint event: - // Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - nodeActionType := framework.Add | framework.UpdateNodeLabel | framework.UpdateNodeTaint - if pl.fts.EnableSchedulingQueueHint { - // When scheduling queue hint is enabled, we don't use the problematic preCheck and don't need to register UpdateNodeTaint event. - nodeActionType = framework.Add | framework.UpdateNodeLabel - } - events := []framework.ClusterEventWithHint{ - // Pods may fail because of missing or mis-configured storage class - // (e.g., allowedTopologies, volumeBindingMode), and hence may become - // schedulable upon StorageClass Add or Update events. - {Event: framework.ClusterEvent{Resource: framework.StorageClass, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterStorageClassChange}, - - // We bind PVCs with PVs, so any changes may make the pods schedulable. - {Event: framework.ClusterEvent{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterPersistentVolumeClaimChange}, - {Event: framework.ClusterEvent{Resource: framework.PersistentVolume, ActionType: framework.Add | framework.Update}}, - - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}}, - - // We rely on CSI node to translate in-tree PV to CSI. - // TODO: kube-schduler will unregister the CSINode events once all the volume plugins has completed their CSI migration. - {Event: framework.ClusterEvent{Resource: framework.CSINode, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterCSINodeChange}, - - // When CSIStorageCapacity is enabled, pods may become schedulable - // on CSI driver & storage capacity changes. - {Event: framework.ClusterEvent{Resource: framework.CSIDriver, ActionType: framework.Update}, QueueingHintFn: pl.isSchedulableAfterCSIDriverChange}, - {Event: framework.ClusterEvent{Resource: framework.CSIStorageCapacity, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterCSIStorageCapacityChange}, - } - return events, nil -} - -func (pl *VolumeBinding) isSchedulableAfterCSINodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - if oldObj == nil { - logger.V(5).Info("CSINode creation could make the pod schedulable") - return framework.Queue, nil - } - oldCSINode, modifiedCSINode, err := util.As[*storagev1.CSINode](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - logger = klog.LoggerWithValues( - logger, - "Pod", klog.KObj(pod), - "CSINode", klog.KObj(modifiedCSINode), - ) - - if oldCSINode.ObjectMeta.Annotations[v1.MigratedPluginsAnnotationKey] != modifiedCSINode.ObjectMeta.Annotations[v1.MigratedPluginsAnnotationKey] { - logger.V(5).Info("CSINode's migrated plugins annotation is updated and that may make the pod schedulable") - return framework.Queue, nil - } - - logger.V(5).Info("CISNode was created or updated but it doesn't make this pod schedulable") - return framework.QueueSkip, nil -} - -func (pl *VolumeBinding) isSchedulableAfterPersistentVolumeClaimChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - _, newPVC, err := util.As[*v1.PersistentVolumeClaim](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - logger = klog.LoggerWithValues( - logger, - "Pod", klog.KObj(pod), - "PersistentVolumeClaim", klog.KObj(newPVC), - ) - - if pod.Namespace != newPVC.Namespace { - logger.V(5).Info("PersistentVolumeClaim was created or updated, but it doesn't make this pod schedulable because the PVC belongs to a different namespace") - return framework.QueueSkip, nil - } - - for _, vol := range pod.Spec.Volumes { - var pvcName string - switch { - case vol.PersistentVolumeClaim != nil: - pvcName = vol.PersistentVolumeClaim.ClaimName - case vol.Ephemeral != nil: - pvcName = ephemeral.VolumeClaimName(pod, &vol) - default: - continue - } - - if pvcName == newPVC.Name { - // Return Queue because, in this case, - // all PVC creations and almost all PVC updates could make the Pod schedulable. - logger.V(5).Info("PersistentVolumeClaim the pod requires was created or updated, potentially making the target Pod schedulable") - return framework.Queue, nil - } - } - - logger.V(5).Info("PersistentVolumeClaim was created or updated, but it doesn't make this pod schedulable") - return framework.QueueSkip, nil -} - -// isSchedulableAfterStorageClassChange checks whether an StorageClass event might make a Pod schedulable or not. -// Any StorageClass addition and a StorageClass update to allowedTopologies -// might make a Pod schedulable. -// Note that an update to volume binding mode is not allowed and we don't have to consider while examining the update event. -func (pl *VolumeBinding) isSchedulableAfterStorageClassChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - oldSC, newSC, err := util.As[*storagev1.StorageClass](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - logger = klog.LoggerWithValues( - logger, - "Pod", klog.KObj(pod), - "StorageClass", klog.KObj(newSC), - ) - - if oldSC == nil { - // No further filtering can be made for a creation event, - // and we just always return Queue. - logger.V(5).Info("A new StorageClass was created, which could make a Pod schedulable") - return framework.Queue, nil - } - - if !apiequality.Semantic.DeepEqual(newSC.AllowedTopologies, oldSC.AllowedTopologies) { - logger.V(5).Info("StorageClass got an update in AllowedTopologies", "AllowedTopologies", newSC.AllowedTopologies) - return framework.Queue, nil - } - - logger.V(5).Info("StorageClass was updated, but it doesn't make this pod schedulable") - return framework.QueueSkip, nil -} - -// isSchedulableAfterCSIStorageCapacityChange checks whether a CSIStorageCapacity event -// might make a Pod schedulable or not. -// Any CSIStorageCapacity addition and a CSIStorageCapacity update to volume limit -// (calculated based on capacity and maximumVolumeSize) might make a Pod schedulable. -// Note that an update to nodeTopology and storageClassName is not allowed and -// we don't have to consider while examining the update event. -func (pl *VolumeBinding) isSchedulableAfterCSIStorageCapacityChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - oldCap, newCap, err := util.As[*storagev1.CSIStorageCapacity](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - if oldCap == nil { - logger.V(5).Info( - "A new CSIStorageCapacity was created, which could make a Pod schedulable", - "Pod", klog.KObj(pod), - "CSIStorageCapacity", klog.KObj(newCap), - ) - return framework.Queue, nil - } - - oldLimit := volumeLimit(oldCap) - newLimit := volumeLimit(newCap) - - logger = klog.LoggerWithValues( - logger, - "Pod", klog.KObj(pod), - "CSIStorageCapacity", klog.KObj(newCap), - "volumeLimit(new)", newLimit, - "volumeLimit(old)", oldLimit, - ) - - if newLimit != nil && (oldLimit == nil || newLimit.Value() > oldLimit.Value()) { - logger.V(5).Info("VolumeLimit was increased, which could make a Pod schedulable") - return framework.Queue, nil - } - - logger.V(5).Info("CSIStorageCapacity was updated, but it doesn't make this pod schedulable") - return framework.QueueSkip, nil -} - -func (pl *VolumeBinding) isSchedulableAfterCSIDriverChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalCSIDriver, modifiedCSIDriver, err := util.As[*storagev1.CSIDriver](oldObj, newObj) - if err != nil { - return framework.Queue, err - } - - logger = klog.LoggerWithValues( - logger, - "Pod", klog.KObj(pod), - "CSIDriver", klog.KObj(modifiedCSIDriver), - ) - - for _, vol := range pod.Spec.Volumes { - if vol.CSI == nil || vol.CSI.Driver != modifiedCSIDriver.Name { - continue - } - if (originalCSIDriver.Spec.StorageCapacity != nil && *originalCSIDriver.Spec.StorageCapacity) && - (modifiedCSIDriver.Spec.StorageCapacity == nil || !*modifiedCSIDriver.Spec.StorageCapacity) { - logger.V(5).Info("CSIDriver was updated and storage capacity got disabled, which may make the pod schedulable") - return framework.Queue, nil - } - } - - logger.V(5).Info("CSIDriver was created or updated but it doesn't make this pod schedulable") - return framework.QueueSkip, nil -} - -// podHasPVCs returns 2 values: -// - the first one to denote if the given "pod" has any PVC defined. -// - the second one to return any error if the requested PVC is illegal. -func (pl *VolumeBinding) podHasPVCs(pod *v1.Pod) (bool, error) { - hasPVC := false - for _, vol := range pod.Spec.Volumes { - var pvcName string - isEphemeral := false - switch { - case vol.PersistentVolumeClaim != nil: - pvcName = vol.PersistentVolumeClaim.ClaimName - case vol.Ephemeral != nil: - pvcName = ephemeral.VolumeClaimName(pod, &vol) - isEphemeral = true - default: - // Volume is not using a PVC, ignore - continue - } - hasPVC = true - pvc, err := pl.PVCLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName) - if err != nil { - // The error usually has already enough context ("persistentvolumeclaim "myclaim" not found"), - // but we can do better for generic ephemeral inline volumes where that situation - // is normal directly after creating a pod. - if isEphemeral && apierrors.IsNotFound(err) { - err = fmt.Errorf("waiting for ephemeral volume controller to create the persistentvolumeclaim %q", pvcName) - } - return hasPVC, err - } - - if pvc.Status.Phase == v1.ClaimLost { - return hasPVC, fmt.Errorf("persistentvolumeclaim %q bound to non-existent persistentvolume %q", pvc.Name, pvc.Spec.VolumeName) - } - - if pvc.DeletionTimestamp != nil { - return hasPVC, fmt.Errorf("persistentvolumeclaim %q is being deleted", pvc.Name) - } - - if isEphemeral { - if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil { - return hasPVC, err - } - } - } - return hasPVC, nil -} - -// PreFilter invoked at the prefilter extension point to check if pod has all -// immediate PVCs bound. If not all immediate PVCs are bound, an -// UnschedulableAndUnresolvable is returned. -func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - logger := klog.FromContext(ctx) - // If pod does not reference any PVC, we don't need to do anything. - if hasPVC, err := pl.podHasPVCs(pod); err != nil { - return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error()) - } else if !hasPVC { - state.Write(stateKey, &stateData{}) - return nil, framework.NewStatus(framework.Skip) - } - podVolumeClaims, err := pl.Binder.GetPodVolumeClaims(logger, pod) - if err != nil { - return nil, framework.AsStatus(err) - } - if len(podVolumeClaims.unboundClaimsImmediate) > 0 { - // Return UnschedulableAndUnresolvable error if immediate claims are - // not bound. Pod will be moved to active/backoff queues once these - // claims are bound by PV controller. - status := framework.NewStatus(framework.UnschedulableAndUnresolvable) - status.AppendReason("pod has unbound immediate PersistentVolumeClaims") - return nil, status - } - state.Write(stateKey, &stateData{ - podVolumesByNode: make(map[string]*PodVolumes), - podVolumeClaims: &PodVolumeClaims{ - boundClaims: podVolumeClaims.boundClaims, - unboundClaimsDelayBinding: podVolumeClaims.unboundClaimsDelayBinding, - unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding, - }, - }) - return nil, nil -} - -// PreFilterExtensions returns prefilter extensions, pod add and remove. -func (pl *VolumeBinding) PreFilterExtensions() framework.PreFilterExtensions { - return nil -} - -func getStateData(cs *framework.CycleState) (*stateData, error) { - state, err := cs.Read(stateKey) - if err != nil { - return nil, err - } - s, ok := state.(*stateData) - if !ok { - return nil, errors.New("unable to convert state into stateData") - } - return s, nil -} - -// Filter invoked at the filter extension point. -// It evaluates if a pod can fit due to the volumes it requests, -// for both bound and unbound PVCs. -// -// For PVCs that are bound, then it checks that the corresponding PV's node affinity is -// satisfied by the given node. -// -// For PVCs that are unbound, it tries to find available PVs that can satisfy the PVC requirements -// and that the PV node affinity is satisfied by the given node. -// -// If storage capacity tracking is enabled, then enough space has to be available -// for the node and volumes that still need to be created. -// -// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound -// PVCs can be matched with an available and node-compatible PV. -func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - logger := klog.FromContext(ctx) - node := nodeInfo.Node() - - state, err := getStateData(cs) - if err != nil { - return framework.AsStatus(err) - } - - podVolumes, reasons, err := pl.Binder.FindPodVolumes(logger, pod, state.podVolumeClaims, node) - - if err != nil { - return framework.AsStatus(err) - } - - if len(reasons) > 0 { - status := framework.NewStatus(framework.UnschedulableAndUnresolvable) - for _, reason := range reasons { - status.AppendReason(string(reason)) - } - return status - } - - // multiple goroutines call `Filter` on different nodes simultaneously and the `CycleState` may be duplicated, so we must use a local lock here - state.Lock() - state.podVolumesByNode[node.Name] = podVolumes - state.hasStaticBindings = state.hasStaticBindings || (podVolumes != nil && len(podVolumes.StaticBindings) > 0) - state.Unlock() - return nil -} - -// PreScore invoked at the preScore extension point. It checks whether volumeBinding can skip Score -func (pl *VolumeBinding) PreScore(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status { - if pl.scorer == nil { - return framework.NewStatus(framework.Skip) - } - state, err := getStateData(cs) - if err != nil { - return framework.AsStatus(err) - } - if state.hasStaticBindings || pl.fts.EnableStorageCapacityScoring { - return nil - } - return framework.NewStatus(framework.Skip) -} - -// Score invoked at the score extension point. -func (pl *VolumeBinding) Score(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - if pl.scorer == nil { - return 0, nil - } - state, err := getStateData(cs) - if err != nil { - return 0, framework.AsStatus(err) - } - nodeName := nodeInfo.Node().Name - podVolumes, ok := state.podVolumesByNode[nodeName] - if !ok { - return 0, nil - } - - classResources := make(classResourceMap) - if len(podVolumes.StaticBindings) != 0 || !pl.fts.EnableStorageCapacityScoring { - // group static binding volumes by storage class - for _, staticBinding := range podVolumes.StaticBindings { - class := staticBinding.StorageClassName() - storageResource := staticBinding.StorageResource() - if _, ok := classResources[class]; !ok { - classResources[class] = &StorageResource{ - Requested: 0, - Capacity: 0, - } - } - classResources[class].Requested += storageResource.Requested - classResources[class].Capacity += storageResource.Capacity - } - } else { - // group dynamic binding volumes by storage class - for _, provision := range podVolumes.DynamicProvisions { - if provision.NodeCapacity == nil { - continue - } - class := *provision.PVC.Spec.StorageClassName - if _, ok := classResources[class]; !ok { - classResources[class] = &StorageResource{ - Requested: 0, - Capacity: 0, - } - } - // The following line cannot be +=. For example, if a Pod requests two 50GB volumes from - // a StorageClass with 100GB of capacity on a node, this part of the code will be executed twice. - // In that case, using += would incorrectly set classResources[class].Capacity to 200GB. - classResources[class].Capacity = provision.NodeCapacity.Capacity.Value() - requestedQty := provision.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - classResources[class].Requested += requestedQty.Value() - } - } - - return pl.scorer(classResources), nil -} - -// ScoreExtensions of the Score plugin. -func (pl *VolumeBinding) ScoreExtensions() framework.ScoreExtensions { - return nil -} - -// Reserve reserves volumes of pod and saves binding status in cycle state. -func (pl *VolumeBinding) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status { - state, err := getStateData(cs) - if err != nil { - return framework.AsStatus(err) - } - // we don't need to hold the lock as only one node will be reserved for the given pod - podVolumes, ok := state.podVolumesByNode[nodeName] - if ok { - allBound, err := pl.Binder.AssumePodVolumes(klog.FromContext(ctx), pod, nodeName, podVolumes) - if err != nil { - return framework.AsStatus(err) - } - state.allBound = allBound - } else { - // may not exist if the pod does not reference any PVC - state.allBound = true - } - return nil -} - -// PreBind will make the API update with the assumed bindings and wait until -// the PV controller has completely finished the binding operation. -// -// If binding errors, times out or gets undone, then an error will be returned to -// retry scheduling. -func (pl *VolumeBinding) PreBind(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status { - s, err := getStateData(cs) - if err != nil { - return framework.AsStatus(err) - } - if s.allBound { - // no need to bind volumes - return nil - } - // we don't need to hold the lock as only one node will be pre-bound for the given pod - podVolumes, ok := s.podVolumesByNode[nodeName] - if !ok { - return framework.AsStatus(fmt.Errorf("no pod volumes found for node %q", nodeName)) - } - logger := klog.FromContext(ctx) - logger.V(5).Info("Trying to bind volumes for pod", "pod", klog.KObj(pod)) - err = pl.Binder.BindPodVolumes(ctx, pod, podVolumes) - if err != nil { - logger.V(5).Info("Failed to bind volumes for pod", "pod", klog.KObj(pod), "err", err) - return framework.AsStatus(err) - } - logger.V(5).Info("Success binding volumes for pod", "pod", klog.KObj(pod)) - return nil -} - -// Unreserve clears assumed PV and PVC cache. -// It's idempotent, and does nothing if no cache found for the given pod. -func (pl *VolumeBinding) Unreserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) { - s, err := getStateData(cs) - if err != nil { - return - } - // we don't need to hold the lock as only one node may be unreserved - podVolumes, ok := s.podVolumesByNode[nodeName] - if !ok { - return - } - pl.Binder.RevertAssumedPodVolumes(podVolumes) -} - -// New initializes a new plugin and returns it. -func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { - args, ok := plArgs.(*config.VolumeBindingArgs) - if !ok { - return nil, fmt.Errorf("want args to be of type VolumeBindingArgs, got %T", plArgs) - } - if err := validation.ValidateVolumeBindingArgsWithOptions(nil, args, validation.VolumeBindingArgsValidationOptions{ - AllowStorageCapacityScoring: fts.EnableStorageCapacityScoring, - }); err != nil { - return nil, err - } - podInformer := fh.SharedInformerFactory().Core().V1().Pods() - nodeInformer := fh.SharedInformerFactory().Core().V1().Nodes() - pvcInformer := fh.SharedInformerFactory().Core().V1().PersistentVolumeClaims() - pvInformer := fh.SharedInformerFactory().Core().V1().PersistentVolumes() - storageClassInformer := fh.SharedInformerFactory().Storage().V1().StorageClasses() - csiNodeInformer := fh.SharedInformerFactory().Storage().V1().CSINodes() - capacityCheck := CapacityCheck{ - CSIDriverInformer: fh.SharedInformerFactory().Storage().V1().CSIDrivers(), - CSIStorageCapacityInformer: fh.SharedInformerFactory().Storage().V1().CSIStorageCapacities(), - } - binder := NewVolumeBinder(klog.FromContext(ctx), fh.ClientSet(), fts, podInformer, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, capacityCheck, time.Duration(args.BindTimeoutSeconds)*time.Second) - - // build score function - var scorer volumeCapacityScorer - if fts.EnableStorageCapacityScoring { - shape := make(helper.FunctionShape, 0, len(args.Shape)) - for _, point := range args.Shape { - shape = append(shape, helper.FunctionShapePoint{ - Utilization: int64(point.Utilization), - Score: int64(point.Score) * (framework.MaxNodeScore / config.MaxCustomPriorityScore), - }) - } - scorer = buildScorerFunction(shape) - } - return &VolumeBinding{ - Binder: binder, - PVCLister: pvcInformer.Lister(), - classLister: storageClassInformer.Lister(), - scorer: scorer, - fts: fts, - }, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/OWNERS deleted file mode 100644 index be3245e85e1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-storage-approvers - - cofyc -reviewers: - - sig-storage-reviewers - - cofyc -labels: - - sig/storage diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go deleted file mode 100644 index 6b22340b4ab..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go +++ /dev/null @@ -1,426 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volumerestrictions - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/klog/v2" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// VolumeRestrictions is a plugin that checks volume restrictions. -type VolumeRestrictions struct { - pvcLister corelisters.PersistentVolumeClaimLister - sharedLister framework.SharedLister - enableSchedulingQueueHint bool -} - -var _ framework.PreFilterPlugin = &VolumeRestrictions{} -var _ framework.FilterPlugin = &VolumeRestrictions{} -var _ framework.EnqueueExtensions = &VolumeRestrictions{} -var _ framework.StateData = &preFilterState{} - -const ( - // Name is the name of the plugin used in the plugin registry and configurations. - Name = names.VolumeRestrictions - // preFilterStateKey is the key in CycleState to VolumeRestrictions pre-computed data for Filtering. - // Using the name of the plugin will likely help us avoid collisions with other plugins. - preFilterStateKey = "PreFilter" + Name - - // ErrReasonDiskConflict is used for NoDiskConflict predicate error. - ErrReasonDiskConflict = "node(s) had no available disk" - // ErrReasonReadWriteOncePodConflict is used when a pod is found using the same PVC with the ReadWriteOncePod access mode. - ErrReasonReadWriteOncePodConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode" -) - -// preFilterState computed at PreFilter and used at Filter. -type preFilterState struct { - // Names of the pod's volumes using the ReadWriteOncePod access mode. - readWriteOncePodPVCs sets.Set[string] - // The number of references to these ReadWriteOncePod volumes by scheduled pods. - conflictingPVCRefCount int -} - -func (s *preFilterState) updateWithPod(podInfo *framework.PodInfo, multiplier int) { - s.conflictingPVCRefCount += multiplier * s.conflictingPVCRefCountForPod(podInfo) -} - -func (s *preFilterState) conflictingPVCRefCountForPod(podInfo *framework.PodInfo) int { - conflicts := 0 - for _, volume := range podInfo.Pod.Spec.Volumes { - if volume.PersistentVolumeClaim == nil { - continue - } - if s.readWriteOncePodPVCs.Has(volume.PersistentVolumeClaim.ClaimName) { - conflicts += 1 - } - } - return conflicts -} - -// Clone the prefilter state. -func (s *preFilterState) Clone() framework.StateData { - if s == nil { - return nil - } - return &preFilterState{ - readWriteOncePodPVCs: s.readWriteOncePodPVCs, - conflictingPVCRefCount: s.conflictingPVCRefCount, - } -} - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *VolumeRestrictions) Name() string { - return Name -} - -func isVolumeConflict(volume *v1.Volume, pod *v1.Pod) bool { - for _, existingVolume := range pod.Spec.Volumes { - // Same GCE disk mounted by multiple pods conflicts unless all pods mount it read-only. - if volume.GCEPersistentDisk != nil && existingVolume.GCEPersistentDisk != nil { - disk, existingDisk := volume.GCEPersistentDisk, existingVolume.GCEPersistentDisk - if disk.PDName == existingDisk.PDName && !(disk.ReadOnly && existingDisk.ReadOnly) { - return true - } - } - - if volume.AWSElasticBlockStore != nil && existingVolume.AWSElasticBlockStore != nil { - if volume.AWSElasticBlockStore.VolumeID == existingVolume.AWSElasticBlockStore.VolumeID { - return true - } - } - - if volume.ISCSI != nil && existingVolume.ISCSI != nil { - iqn := volume.ISCSI.IQN - eiqn := existingVolume.ISCSI.IQN - // two ISCSI volumes are same, if they share the same iqn. As iscsi volumes are of type - // RWO or ROX, we could permit only one RW mount. Same iscsi volume mounted by multiple Pods - // conflict unless all other pods mount as read only. - if iqn == eiqn && !(volume.ISCSI.ReadOnly && existingVolume.ISCSI.ReadOnly) { - return true - } - } - - if volume.RBD != nil && existingVolume.RBD != nil { - mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage - emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage - // two RBDs images are the same if they share the same Ceph monitor, are in the same RADOS Pool, and have the same image name - // only one read-write mount is permitted for the same RBD image. - // same RBD image mounted by multiple Pods conflicts unless all Pods mount the image read-only - if haveOverlap(mon, emon) && pool == epool && image == eimage && !(volume.RBD.ReadOnly && existingVolume.RBD.ReadOnly) { - return true - } - } - } - - return false -} - -// haveOverlap searches two arrays and returns true if they have at least one common element; returns false otherwise. -func haveOverlap(a1, a2 []string) bool { - if len(a1) > len(a2) { - a1, a2 = a2, a1 - } - m := sets.New(a1...) - for _, val := range a2 { - if _, ok := m[val]; ok { - return true - } - } - - return false -} - -// return true if there are conflict checking targets. -func needsRestrictionsCheck(v v1.Volume) bool { - return v.GCEPersistentDisk != nil || v.AWSElasticBlockStore != nil || v.RBD != nil || v.ISCSI != nil -} - -// PreFilter computes and stores cycleState containing details for enforcing ReadWriteOncePod. -func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - needsCheck := false - for i := range pod.Spec.Volumes { - if needsRestrictionsCheck(pod.Spec.Volumes[i]) { - needsCheck = true - break - } - } - - pvcs, err := pl.readWriteOncePodPVCsForPod(ctx, pod) - if err != nil { - if apierrors.IsNotFound(err) { - return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error()) - } - return nil, framework.AsStatus(err) - } - - s, err := pl.calPreFilterState(ctx, pod, pvcs) - if err != nil { - return nil, framework.AsStatus(err) - } - - if !needsCheck && s.conflictingPVCRefCount == 0 { - return nil, framework.NewStatus(framework.Skip) - } - cycleState.Write(preFilterStateKey, s) - return nil, nil -} - -// AddPod from pre-computed data in cycleState. -func (pl *VolumeRestrictions) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - state, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - state.updateWithPod(podInfoToAdd, 1) - return nil -} - -// RemovePod from pre-computed data in cycleState. -func (pl *VolumeRestrictions) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - state, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - state.updateWithPod(podInfoToRemove, -1) - return nil -} - -func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) { - c, err := cycleState.Read(preFilterStateKey) - if err != nil { - // preFilterState doesn't exist, likely PreFilter wasn't invoked. - return nil, fmt.Errorf("cannot read %q from cycleState", preFilterStateKey) - } - - s, ok := c.(*preFilterState) - if !ok { - return nil, fmt.Errorf("%+v convert to volumerestrictions.state error", c) - } - return s, nil -} - -// calPreFilterState computes preFilterState describing which PVCs use ReadWriteOncePod -// and which pods in the cluster are in conflict. -func (pl *VolumeRestrictions) calPreFilterState(ctx context.Context, pod *v1.Pod, pvcs sets.Set[string]) (*preFilterState, error) { - conflictingPVCRefCount := 0 - for pvc := range pvcs { - key := framework.GetNamespacedName(pod.Namespace, pvc) - if pl.sharedLister.StorageInfos().IsPVCUsedByPods(key) { - // There can only be at most one pod using the ReadWriteOncePod PVC. - conflictingPVCRefCount += 1 - } - } - return &preFilterState{ - readWriteOncePodPVCs: pvcs, - conflictingPVCRefCount: conflictingPVCRefCount, - }, nil -} - -func (pl *VolumeRestrictions) readWriteOncePodPVCsForPod(ctx context.Context, pod *v1.Pod) (sets.Set[string], error) { - pvcs := sets.New[string]() - for _, volume := range pod.Spec.Volumes { - if volume.PersistentVolumeClaim == nil { - continue - } - - pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName) - if err != nil { - return nil, err - } - - if !v1helper.ContainsAccessMode(pvc.Spec.AccessModes, v1.ReadWriteOncePod) { - continue - } - pvcs.Insert(pvc.Name) - } - return pvcs, nil -} - -// Checks if scheduling the pod onto this node would cause any conflicts with -// existing volumes. -func satisfyVolumeConflicts(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { - for i := range pod.Spec.Volumes { - v := pod.Spec.Volumes[i] - if !needsRestrictionsCheck(v) { - continue - } - for _, ev := range nodeInfo.Pods { - if isVolumeConflict(&v, ev.Pod) { - return false - } - } - } - return true -} - -// Checks if scheduling the pod would cause any ReadWriteOncePod PVC access mode conflicts. -func satisfyReadWriteOncePod(ctx context.Context, state *preFilterState) *framework.Status { - if state == nil { - return nil - } - if state.conflictingPVCRefCount > 0 { - return framework.NewStatus(framework.Unschedulable, ErrReasonReadWriteOncePodConflict) - } - return nil -} - -// PreFilterExtensions returns prefilter extensions, pod add and remove. -func (pl *VolumeRestrictions) PreFilterExtensions() framework.PreFilterExtensions { - return pl -} - -// Filter invoked at the filter extension point. -// It evaluates if a pod can fit due to the volumes it requests, and those that -// are already mounted. If there is already a volume mounted on that node, another pod that uses the same volume -// can't be scheduled there. -// This is GCE, Amazon EBS, ISCSI and Ceph RBD specific for now: -// - GCE PD allows multiple mounts as long as they're all read-only -// - AWS EBS forbids any two pods mounting the same volume ID -// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image, and the image is read-only -// - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only -// If the pod uses PVCs with the ReadWriteOncePod access mode, it evaluates if -// these PVCs are already in-use and if preemption will help. -func (pl *VolumeRestrictions) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - if !satisfyVolumeConflicts(pod, nodeInfo) { - return framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) - } - state, err := getPreFilterState(cycleState) - if err != nil { - return framework.AsStatus(err) - } - return satisfyReadWriteOncePod(ctx, state) -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *VolumeRestrictions) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - // A note about UpdateNodeTaint/UpdateNodeLabel event: - // Ideally, it's supposed to register only Add because any Node update event will never change the result from this plugin. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - nodeActionType := framework.Add | framework.UpdateNodeTaint | framework.UpdateNodeLabel - if pl.enableSchedulingQueueHint { - // preCheck is not used when QHint is enabled, and hence Update event isn't necessary. - nodeActionType = framework.Add - } - - return []framework.ClusterEventWithHint{ - // Pods may fail to schedule because of volumes conflicting with other pods on same node. - // Once running pods are deleted and volumes have been released, the unschedulable pod will be schedulable. - // Due to immutable fields `spec.volumes`, pod update events are ignored. - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Delete}, QueueingHintFn: pl.isSchedulableAfterPodDeleted}, - // A new Node may make a pod schedulable. - // We intentionally don't set QueueingHint since all Node/Add events could make Pods schedulable. - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}}, - // Pods may fail to schedule because the PVC it uses has not yet been created. - // This PVC is required to exist to check its access modes. - {Event: framework.ClusterEvent{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}, - QueueingHintFn: pl.isSchedulableAfterPersistentVolumeClaimAdded}, - }, nil -} - -// isSchedulableAfterPersistentVolumeClaimAdded is invoked whenever a PersistentVolumeClaim added or changed, It checks whether -// that change made a previously unschedulable pod schedulable. -func (pl *VolumeRestrictions) isSchedulableAfterPersistentVolumeClaimAdded(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - _, newPersistentVolumeClaim, err := util.As[*v1.PersistentVolumeClaim](oldObj, newObj) - if err != nil { - return framework.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPersistentVolumeClaimChange: %w", err) - } - - if newPersistentVolumeClaim.Namespace != pod.Namespace { - return framework.QueueSkip, nil - } - - for _, volume := range pod.Spec.Volumes { - if volume.PersistentVolumeClaim == nil { - continue - } - - if volume.PersistentVolumeClaim.ClaimName == newPersistentVolumeClaim.Name { - logger.V(5).Info("PVC that is referred from the pod was created, which might make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(newPersistentVolumeClaim)) - return framework.Queue, nil - } - } - logger.V(5).Info("PVC irrelevant to the Pod was created, which doesn't make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(newPersistentVolumeClaim)) - return framework.QueueSkip, nil -} - -// isSchedulableAfterPodDeleted is invoked whenever a pod deleted, -// It checks whether the deleted pod will conflict with volumes of other pods on the same node -func (pl *VolumeRestrictions) isSchedulableAfterPodDeleted(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - deletedPod, _, err := util.As[*v1.Pod](oldObj, newObj) - if err != nil { - return framework.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPodDeleted: %w", err) - } - - if deletedPod.Namespace != pod.Namespace { - return framework.QueueSkip, nil - } - - nodeInfo := framework.NewNodeInfo(deletedPod) - if !satisfyVolumeConflicts(pod, nodeInfo) { - logger.V(5).Info("Pod with the volume that the target pod requires was deleted, which might make this pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod)) - return framework.Queue, nil - } - - // Return Queue if a deleted pod uses the same PVC since the pod may be unschedulable due to the ReadWriteOncePod access mode of the PVC. - // - // For now, we don't actually fetch PVC and check the access mode because that operation could be expensive. - // Once the observability around QHint is established, - // we may want to do that depending on how much the operation would impact the QHint latency negatively. - // https://github.com/kubernetes/kubernetes/issues/124566 - claims := sets.New[string]() - for _, volume := range pod.Spec.Volumes { - if volume.PersistentVolumeClaim != nil { - claims.Insert(volume.PersistentVolumeClaim.ClaimName) - } - } - for _, volume := range deletedPod.Spec.Volumes { - if volume.PersistentVolumeClaim != nil && claims.Has(volume.PersistentVolumeClaim.ClaimName) { - logger.V(5).Info("Pod with the same PVC that the target pod requires was deleted, which might make this pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod)) - return framework.Queue, nil - } - } - - logger.V(5).Info("An irrelevant Pod was deleted, which doesn't make this pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod)) - return framework.QueueSkip, nil -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() - sharedLister := handle.SnapshotSharedLister() - - return &VolumeRestrictions{ - pvcLister: pvcLister, - sharedLister: sharedLister, - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - }, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/OWNERS deleted file mode 100644 index be3245e85e1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-storage-approvers - - cofyc -reviewers: - - sig-storage-reviewers - - cofyc -labels: - - sig/storage diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go deleted file mode 100644 index ec39f2ef7ca..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go +++ /dev/null @@ -1,410 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volumezone - -import ( - "context" - "errors" - "fmt" - "reflect" - - v1 "k8s.io/api/core/v1" - storage "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - corelisters "k8s.io/client-go/listers/core/v1" - storagelisters "k8s.io/client-go/listers/storage/v1" - volumehelpers "k8s.io/cloud-provider/volume/helpers" - storagehelpers "k8s.io/component-helpers/storage/volume" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// VolumeZone is a plugin that checks volume zone. -type VolumeZone struct { - pvLister corelisters.PersistentVolumeLister - pvcLister corelisters.PersistentVolumeClaimLister - scLister storagelisters.StorageClassLister - enableSchedulingQueueHint bool -} - -var _ framework.FilterPlugin = &VolumeZone{} -var _ framework.PreFilterPlugin = &VolumeZone{} -var _ framework.EnqueueExtensions = &VolumeZone{} - -const ( - // Name is the name of the plugin used in the plugin registry and configurations. - Name = names.VolumeZone - - preFilterStateKey framework.StateKey = "PreFilter" + Name - - // ErrReasonConflict is used for NoVolumeZoneConflict predicate error. - ErrReasonConflict = "node(s) had no available volume zone" -) - -// pvTopology holds the value of a pv's topologyLabel -type pvTopology struct { - pvName string - key string - values sets.Set[string] -} - -// the state is initialized in PreFilter phase. because we save the pointer in -// framework.CycleState, in the later phases we don't need to call Write method -// to update the value -type stateData struct { - // podPVTopologies holds the pv information we need - // it's initialized in the PreFilter phase - podPVTopologies []pvTopology -} - -func (d *stateData) Clone() framework.StateData { - return d -} - -var topologyLabels = []string{ - v1.LabelFailureDomainBetaZone, - v1.LabelFailureDomainBetaRegion, - v1.LabelTopologyZone, - v1.LabelTopologyRegion, -} - -func translateToGALabel(label string) string { - if label == v1.LabelFailureDomainBetaRegion { - return v1.LabelTopologyRegion - } - if label == v1.LabelFailureDomainBetaZone { - return v1.LabelTopologyZone - } - return label -} - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *VolumeZone) Name() string { - return Name -} - -// PreFilter invoked at the prefilter extension point -// -// # It finds the topology of the PersistentVolumes corresponding to the volumes a pod requests -// -// Currently, this is only supported with PersistentVolumeClaims, -// and only looks for the bound PersistentVolume. -func (pl *VolumeZone) PreFilter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - logger := klog.FromContext(ctx) - podPVTopologies, status := pl.getPVbyPod(logger, pod) - if !status.IsSuccess() { - return nil, status - } - if len(podPVTopologies) == 0 { - return nil, framework.NewStatus(framework.Skip) - } - cs.Write(preFilterStateKey, &stateData{podPVTopologies: podPVTopologies}) - return nil, nil -} - -// getPVbyPod gets PVTopology from pod -func (pl *VolumeZone) getPVbyPod(logger klog.Logger, pod *v1.Pod) ([]pvTopology, *framework.Status) { - podPVTopologies := make([]pvTopology, 0) - - pvcNames := pl.getPersistentVolumeClaimNameFromPod(pod) - for _, pvcName := range pvcNames { - if pvcName == "" { - return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no name") - } - pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName) - if s := getErrorAsStatus(err); !s.IsSuccess() { - return nil, s - } - - pvName := pvc.Spec.VolumeName - if pvName == "" { - scName := storagehelpers.GetPersistentVolumeClaimClass(pvc) - if len(scName) == 0 { - return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no pv name and storageClass name") - } - - class, err := pl.scLister.Get(scName) - if s := getErrorAsStatus(err); !s.IsSuccess() { - return nil, s - } - if class.VolumeBindingMode == nil { - return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("VolumeBindingMode not set for StorageClass %q", scName)) - } - if *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer { - // Skip unbound volumes - continue - } - - return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "PersistentVolume had no name") - } - - pv, err := pl.pvLister.Get(pvName) - if s := getErrorAsStatus(err); !s.IsSuccess() { - return nil, s - } - podPVTopologies = append(podPVTopologies, pl.getPVTopologies(logger, pv)...) - } - return podPVTopologies, nil -} - -// PreFilterExtensions returns prefilter extensions, pod add and remove. -func (pl *VolumeZone) PreFilterExtensions() framework.PreFilterExtensions { - return nil -} - -// Filter invoked at the filter extension point. -// -// It evaluates if a pod can fit due to the volumes it requests, given -// that some volumes may have zone scheduling constraints. The requirement is that any -// volume zone-labels must match the equivalent zone-labels on the node. It is OK for -// the node to have more zone-label constraints (for example, a hypothetical replicated -// volume might allow region-wide access) -// -// Currently this is only supported with PersistentVolumeClaims, and looks to the labels -// only on the bound PersistentVolume. -// -// Working with volumes declared inline in the pod specification (i.e. not -// using a PersistentVolume) is likely to be harder, as it would require -// determining the zone of a volume during scheduling, and that is likely to -// require calling out to the cloud provider. It seems that we are moving away -// from inline volume declarations anyway. -func (pl *VolumeZone) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - logger := klog.FromContext(ctx) - // If a pod doesn't have any volume attached to it, the predicate will always be true. - // Thus we make a fast path for it, to avoid unnecessary computations in this case. - if len(pod.Spec.Volumes) == 0 { - return nil - } - var podPVTopologies []pvTopology - state, err := getStateData(cs) - if err != nil { - // Fallback to calculate pv list here - var status *framework.Status - podPVTopologies, status = pl.getPVbyPod(logger, pod) - if !status.IsSuccess() { - return status - } - } else { - podPVTopologies = state.podPVTopologies - } - - node := nodeInfo.Node() - hasAnyNodeConstraint := false - for _, topologyLabel := range topologyLabels { - if _, ok := node.Labels[topologyLabel]; ok { - hasAnyNodeConstraint = true - break - } - } - - if !hasAnyNodeConstraint { - // The node has no zone constraints, so we're OK to schedule. - // This is to handle a single-zone cluster scenario where the node may not have any topology labels. - return nil - } - - for _, pvTopology := range podPVTopologies { - v, ok := node.Labels[pvTopology.key] - if !ok { - // if we can't match the beta label, try to match pv's beta label with node's ga label - v, ok = node.Labels[translateToGALabel(pvTopology.key)] - } - if !ok || !pvTopology.values.Has(v) { - logger.V(10).Info("Won't schedule pod onto node due to volume (mismatch on label key)", "pod", klog.KObj(pod), "node", klog.KObj(node), "PV", klog.KRef("", pvTopology.pvName), "PVLabelKey", pvTopology.key) - return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonConflict) - } - } - - return nil -} - -func getStateData(cs *framework.CycleState) (*stateData, error) { - state, err := cs.Read(preFilterStateKey) - if err != nil { - return nil, err - } - s, ok := state.(*stateData) - if !ok { - return nil, errors.New("unable to convert state into stateData") - } - return s, nil -} - -func getErrorAsStatus(err error) *framework.Status { - if err != nil { - if apierrors.IsNotFound(err) { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error()) - } - return framework.AsStatus(err) - } - return nil -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *VolumeZone) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - // A new node or updating a node's volume zone labels may make a pod schedulable. - // A note about UpdateNodeTaint event: - // Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin. - // But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add. - // See: https://github.com/kubernetes/kubernetes/issues/109437 - nodeActionType := framework.Add | framework.UpdateNodeLabel | framework.UpdateNodeTaint - if pl.enableSchedulingQueueHint { - // preCheck is not used when QHint is enabled. - nodeActionType = framework.Add | framework.UpdateNodeLabel - } - - return []framework.ClusterEventWithHint{ - // New storageClass with bind mode `VolumeBindingWaitForFirstConsumer` will make a pod schedulable. - // Due to immutable field `storageClass.volumeBindingMode`, storageClass update events are ignored. - {Event: framework.ClusterEvent{Resource: framework.StorageClass, ActionType: framework.Add}, QueueingHintFn: pl.isSchedulableAfterStorageClassAdded}, - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}}, - // A new pvc may make a pod schedulable. - // Also, if pvc's VolumeName is filled, that also could make a pod schedulable. - {Event: framework.ClusterEvent{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterPersistentVolumeClaimChange}, - // A new pv or updating a pv's volume zone labels may make a pod schedulable. - {Event: framework.ClusterEvent{Resource: framework.PersistentVolume, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterPersistentVolumeChange}, - }, nil -} - -// getPersistentVolumeClaimNameFromPod gets pvc names bound to a pod. -func (pl *VolumeZone) getPersistentVolumeClaimNameFromPod(pod *v1.Pod) []string { - var pvcNames []string - for i := range pod.Spec.Volumes { - volume := pod.Spec.Volumes[i] - if volume.PersistentVolumeClaim == nil { - continue - } - pvcName := volume.PersistentVolumeClaim.ClaimName - pvcNames = append(pvcNames, pvcName) - } - return pvcNames -} - -// isSchedulableAfterPersistentVolumeClaimChange is invoked whenever a PersistentVolumeClaim added or updated. -// It checks whether the change of PVC has made a previously unschedulable pod schedulable. -func (pl *VolumeZone) isSchedulableAfterPersistentVolumeClaimChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - _, modifiedPVC, err := util.As[*v1.PersistentVolumeClaim](oldObj, newObj) - if err != nil { - return framework.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPersistentVolumeClaimChange: %w", err) - } - if pl.isPVCRequestedFromPod(logger, modifiedPVC, pod) { - logger.V(5).Info("PVC that is referred from the pod was created or updated, which might make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(modifiedPVC)) - return framework.Queue, nil - } - - logger.V(5).Info("PVC irrelevant to the Pod was created or updated, which doesn't make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(modifiedPVC)) - return framework.QueueSkip, nil -} - -// isPVCRequestedFromPod verifies if the PVC is requested from a given Pod. -func (pl *VolumeZone) isPVCRequestedFromPod(logger klog.Logger, pvc *v1.PersistentVolumeClaim, pod *v1.Pod) bool { - if (pvc == nil) || (pod.Namespace != pvc.Namespace) { - return false - } - pvcNames := pl.getPersistentVolumeClaimNameFromPod(pod) - for _, pvcName := range pvcNames { - if pvc.Name == pvcName { - logger.V(5).Info("PVC is referred from the pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc)) - return true - } - } - logger.V(5).Info("PVC is not referred from the pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc)) - return false -} - -// isSchedulableAfterStorageClassAdded is invoked whenever a StorageClass is added. -// It checks whether the addition of StorageClass has made a previously unschedulable pod schedulable. -// Only a new StorageClass with WaitForFirstConsumer will cause a pod to become schedulable. -func (pl *VolumeZone) isSchedulableAfterStorageClassAdded(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - _, addedStorageClass, err := util.As[*storage.StorageClass](nil, newObj) - if err != nil { - return framework.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterStorageClassAdded: %w", err) - } - if (addedStorageClass.VolumeBindingMode == nil) || (*addedStorageClass.VolumeBindingMode != storage.VolumeBindingWaitForFirstConsumer) { - logger.V(5).Info("StorageClass is created, but its VolumeBindingMode is not waitForFirstConsumer, which doesn't make the pod schedulable", "storageClass", klog.KObj(addedStorageClass), "pod", klog.KObj(pod)) - return framework.QueueSkip, nil - } - - logger.V(5).Info("StorageClass with waitForFirstConsumer mode was created and it might make this pod schedulable", "pod", klog.KObj(pod), "StorageClass", klog.KObj(addedStorageClass)) - return framework.Queue, nil -} - -// isSchedulableAfterPersistentVolumeChange is invoked whenever a PersistentVolume added or updated. -// It checks whether the change of PV has made a previously unschedulable pod schedulable. -// Changing the PV topology labels could cause the pod to become schedulable. -func (pl *VolumeZone) isSchedulableAfterPersistentVolumeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - originalPV, modifiedPV, err := util.As[*v1.PersistentVolume](oldObj, newObj) - if err != nil { - return framework.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPersistentVolumeChange: %w", err) - } - if originalPV == nil { - logger.V(5).Info("PV is newly created, which might make the pod schedulable") - return framework.Queue, nil - } - originalPVTopologies := pl.getPVTopologies(logger, originalPV) - modifiedPVTopologies := pl.getPVTopologies(logger, modifiedPV) - if !reflect.DeepEqual(originalPVTopologies, modifiedPVTopologies) { - logger.V(5).Info("PV's topology was updated, which might make the pod schedulable.", "pod", klog.KObj(pod), "PV", klog.KObj(modifiedPV)) - return framework.Queue, nil - } - - logger.V(5).Info("PV was updated, but the topology is unchanged, which it doesn't make the pod schedulable", "pod", klog.KObj(pod), "PV", klog.KObj(modifiedPV)) - return framework.QueueSkip, nil -} - -// getPVTopologies retrieves pvTopology from a given PV and returns the array -// This function doesn't check spec.nodeAffinity -// because it's read-only after creation and thus cannot be updated -// and nodeAffinity is being handled in node affinity plugin -func (pl *VolumeZone) getPVTopologies(logger klog.Logger, pv *v1.PersistentVolume) []pvTopology { - podPVTopologies := make([]pvTopology, 0) - for _, key := range topologyLabels { - if value, ok := pv.ObjectMeta.Labels[key]; ok { - labelZonesSet, err := volumehelpers.LabelZonesToSet(value) - if err != nil { - logger.V(5).Info("failed to parse PV's topology label, ignoring the label", "label", fmt.Sprintf("%s:%s", key, value), "err", err) - continue - } - podPVTopologies = append(podPVTopologies, pvTopology{ - pvName: pv.Name, - key: key, - values: sets.Set[string](labelZonesSet), - }) - } - } - return podPVTopologies -} - -// New initializes a new plugin and returns it. -func New(_ context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() - pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() - scLister := informerFactory.Storage().V1().StorageClasses().Lister() - return &VolumeZone{ - pvLister: pvLister, - pvcLister: pvcLister, - scLister: scLister, - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, - }, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go deleted file mode 100644 index d23c18595cc..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go +++ /dev/null @@ -1,739 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package preemption - -import ( - "context" - "errors" - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - v1 "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - corelisters "k8s.io/client-go/listers/core/v1" - policylisters "k8s.io/client-go/listers/policy/v1" - corev1helpers "k8s.io/component-helpers/scheduling/corev1" - "k8s.io/klog/v2" - extenderv1 "k8s.io/kube-scheduler/extender/v1" - apipod "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/parallelize" - "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// Candidate represents a nominated node on which the preemptor can be scheduled, -// along with the list of victims that should be evicted for the preemptor to fit the node. -type Candidate interface { - // Victims wraps a list of to-be-preempted Pods and the number of PDB violation. - Victims() *extenderv1.Victims - // Name returns the target node name where the preemptor gets nominated to run. - Name() string -} - -type candidate struct { - victims *extenderv1.Victims - name string -} - -// Victims returns s.victims. -func (s *candidate) Victims() *extenderv1.Victims { - return s.victims -} - -// Name returns s.name. -func (s *candidate) Name() string { - return s.name -} - -type candidateList struct { - idx int32 - items []Candidate -} - -func newCandidateList(size int32) *candidateList { - return &candidateList{idx: -1, items: make([]Candidate, size)} -} - -// add adds a new candidate to the internal array atomically. -func (cl *candidateList) add(c *candidate) { - if idx := atomic.AddInt32(&cl.idx, 1); idx < int32(len(cl.items)) { - cl.items[idx] = c - } -} - -// size returns the number of candidate stored. Note that some add() operations -// might still be executing when this is called, so care must be taken to -// ensure that all add() operations complete before accessing the elements of -// the list. -func (cl *candidateList) size() int32 { - n := atomic.LoadInt32(&cl.idx) + 1 - if n >= int32(len(cl.items)) { - n = int32(len(cl.items)) - } - return n -} - -// get returns the internal candidate array. This function is NOT atomic and -// assumes that all add() operations have been completed. -func (cl *candidateList) get() []Candidate { - return cl.items[:cl.size()] -} - -// Interface is expected to be implemented by different preemption plugins as all those member -// methods might have different behavior compared with the default preemption. -type Interface interface { - // GetOffsetAndNumCandidates chooses a random offset and calculates the number of candidates that should be - // shortlisted for dry running preemption. - GetOffsetAndNumCandidates(nodes int32) (int32, int32) - // CandidatesToVictimsMap builds a map from the target node to a list of to-be-preempted Pods and the number of PDB violation. - CandidatesToVictimsMap(candidates []Candidate) map[string]*extenderv1.Victims - // PodEligibleToPreemptOthers returns one bool and one string. The bool indicates whether this pod should be considered for - // preempting other pods or not. The string includes the reason if this pod isn't eligible. - PodEligibleToPreemptOthers(ctx context.Context, pod *v1.Pod, nominatedNodeStatus *framework.Status) (bool, string) - // SelectVictimsOnNode finds minimum set of pods on the given node that should be preempted in order to make enough room - // for "pod" to be scheduled. - // Note that both `state` and `nodeInfo` are deep copied. - SelectVictimsOnNode(ctx context.Context, state *framework.CycleState, - pod *v1.Pod, nodeInfo *framework.NodeInfo, pdbs []*policy.PodDisruptionBudget) ([]*v1.Pod, int, *framework.Status) - // OrderedScoreFuncs returns a list of ordered score functions to select preferable node where victims will be preempted. - // The ordered score functions will be processed one by one iff we find more than one node with the highest score. - // Default score functions will be processed if nil returned here for backwards-compatibility. - OrderedScoreFuncs(ctx context.Context, nodesToVictims map[string]*extenderv1.Victims) []func(node string) int64 -} - -type Evaluator struct { - PluginName string - Handler framework.Handle - PodLister corelisters.PodLister - PdbLister policylisters.PodDisruptionBudgetLister - - enableAsyncPreemption bool - mu sync.RWMutex - // preempting is a set that records the pods that are currently triggering preemption asynchronously, - // which is used to prevent the pods from entering the scheduling cycle meanwhile. - preempting sets.Set[types.UID] - - // PreemptPod is a function that actually makes API calls to preempt a specific Pod. - // This is exposed to be replaced during tests. - PreemptPod func(ctx context.Context, c Candidate, preemptor, victim *v1.Pod, pluginName string) error - - Interface -} - -func NewEvaluator(pluginName string, fh framework.Handle, i Interface, enableAsyncPreemption bool) *Evaluator { - podLister := fh.SharedInformerFactory().Core().V1().Pods().Lister() - pdbLister := fh.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister() - - ev := &Evaluator{ - PluginName: pluginName, - Handler: fh, - PodLister: podLister, - PdbLister: pdbLister, - Interface: i, - enableAsyncPreemption: enableAsyncPreemption, - preempting: sets.New[types.UID](), - } - - // PreemptPod actually makes API calls to preempt a specific Pod. - // - // We implement it here directly, rather than creating a separate method like ev.preemptPod(...) - // to prevent the misuse of the PreemptPod function. - ev.PreemptPod = func(ctx context.Context, c Candidate, preemptor, victim *v1.Pod, pluginName string) error { - logger := klog.FromContext(ctx) - - // If the victim is a WaitingPod, send a reject message to the PermitPlugin. - // Otherwise we should delete the victim. - if waitingPod := ev.Handler.GetWaitingPod(victim.UID); waitingPod != nil { - waitingPod.Reject(pluginName, "preempted") - logger.V(2).Info("Preemptor pod rejected a waiting pod", "preemptor", klog.KObj(preemptor), "waitingPod", klog.KObj(victim), "node", c.Name()) - } else { - condition := &v1.PodCondition{ - Type: v1.DisruptionTarget, - ObservedGeneration: apipod.GetPodObservedGenerationIfEnabledOnCondition(&victim.Status, victim.Generation, v1.DisruptionTarget), - Status: v1.ConditionTrue, - Reason: v1.PodReasonPreemptionByScheduler, - Message: fmt.Sprintf("%s: preempting to accommodate a higher priority pod", preemptor.Spec.SchedulerName), - } - newStatus := victim.Status.DeepCopy() - updated := apipod.UpdatePodCondition(newStatus, condition) - if updated { - if err := util.PatchPodStatus(ctx, ev.Handler.ClientSet(), victim, newStatus); err != nil { - logger.Error(err, "Could not add DisruptionTarget condition due to preemption", "pod", klog.KObj(victim), "preemptor", klog.KObj(preemptor)) - return err - } - } - if err := util.DeletePod(ctx, ev.Handler.ClientSet(), victim); err != nil { - if !apierrors.IsNotFound(err) { - logger.Error(err, "Tried to preempted pod", "pod", klog.KObj(victim), "preemptor", klog.KObj(preemptor)) - return err - } - logger.V(2).Info("Victim Pod is already deleted", "preemptor", klog.KObj(preemptor), "victim", klog.KObj(victim), "node", c.Name()) - return nil - } - logger.V(2).Info("Preemptor Pod preempted victim Pod", "preemptor", klog.KObj(preemptor), "victim", klog.KObj(victim), "node", c.Name()) - } - - ev.Handler.EventRecorder().Eventf(victim, preemptor, v1.EventTypeNormal, "Preempted", "Preempting", "Preempted by pod %v on node %v", preemptor.UID, c.Name()) - - return nil - } - - return ev -} - -// IsPodRunningPreemption returns true if the pod is currently triggering preemption asynchronously. -func (ev *Evaluator) IsPodRunningPreemption(podUID types.UID) bool { - ev.mu.RLock() - defer ev.mu.RUnlock() - - return ev.preempting.Has(podUID) -} - -// Preempt returns a PostFilterResult carrying suggested nominatedNodeName, along with a Status. -// The semantics of returned varies on different scenarios: -// -// - . This denotes it's a transient/rare error that may be self-healed in future cycles. -// -// - . This status is mostly as expected like the preemptor is waiting for the -// victims to be fully terminated. -// -// - In both cases above, a nil PostFilterResult is returned to keep the pod's nominatedNodeName unchanged. -// -// - . It indicates the pod cannot be scheduled even with preemption. -// In this case, a non-nil PostFilterResult is returned and result.NominatingMode instructs how to deal with -// the nominatedNodeName. -// -// - . It's the regular happy path -// and the non-empty nominatedNodeName will be applied to the preemptor pod. -func (ev *Evaluator) Preempt(ctx context.Context, state *framework.CycleState, pod *v1.Pod, m framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) { - logger := klog.FromContext(ctx) - - // 0) Fetch the latest version of . - // It's safe to directly fetch pod here. Because the informer cache has already been - // initialized when creating the Scheduler obj. - // However, tests may need to manually initialize the shared pod informer. - podNamespace, podName := pod.Namespace, pod.Name - pod, err := ev.PodLister.Pods(pod.Namespace).Get(pod.Name) - if err != nil { - logger.Error(err, "Could not get the updated preemptor pod object", "pod", klog.KRef(podNamespace, podName)) - return nil, framework.AsStatus(err) - } - - // 1) Ensure the preemptor is eligible to preempt other pods. - nominatedNodeStatus := m.Get(pod.Status.NominatedNodeName) - if ok, msg := ev.PodEligibleToPreemptOthers(ctx, pod, nominatedNodeStatus); !ok { - logger.V(5).Info("Pod is not eligible for preemption", "pod", klog.KObj(pod), "reason", msg) - return nil, framework.NewStatus(framework.Unschedulable, msg) - } - - // 2) Find all preemption candidates. - allNodes, err := ev.Handler.SnapshotSharedLister().NodeInfos().List() - if err != nil { - return nil, framework.AsStatus(err) - } - candidates, nodeToStatusMap, err := ev.findCandidates(ctx, state, allNodes, pod, m) - if err != nil && len(candidates) == 0 { - return nil, framework.AsStatus(err) - } - - // Return a FitError only when there are no candidates that fit the pod. - if len(candidates) == 0 { - logger.V(2).Info("No preemption candidate is found; preemption is not helpful for scheduling", "pod", klog.KObj(pod)) - fitError := &framework.FitError{ - Pod: pod, - NumAllNodes: len(allNodes), - Diagnosis: framework.Diagnosis{ - NodeToStatus: nodeToStatusMap, - // Leave UnschedulablePlugins or PendingPlugins as nil as it won't be used on moving Pods. - }, - } - fitError.Diagnosis.NodeToStatus.SetAbsentNodesStatus(framework.NewStatus(framework.UnschedulableAndUnresolvable, "Preemption is not helpful for scheduling")) - // Specify nominatedNodeName to clear the pod's nominatedNodeName status, if applicable. - return framework.NewPostFilterResultWithNominatedNode(""), framework.NewStatus(framework.Unschedulable, fitError.Error()) - } - - // 3) Interact with registered Extenders to filter out some candidates if needed. - candidates, status := ev.callExtenders(logger, pod, candidates) - if !status.IsSuccess() { - return nil, status - } - - // 4) Find the best candidate. - bestCandidate := ev.SelectCandidate(ctx, candidates) - if bestCandidate == nil || len(bestCandidate.Name()) == 0 { - return nil, framework.NewStatus(framework.Unschedulable, "no candidate node for preemption") - } - - logger.V(2).Info("the target node for the preemption is determined", "node", bestCandidate.Name(), "pod", klog.KObj(pod)) - - // 5) Perform preparation work before nominating the selected candidate. - if ev.enableAsyncPreemption { - ev.prepareCandidateAsync(bestCandidate, pod, ev.PluginName) - } else { - if status := ev.prepareCandidate(ctx, bestCandidate, pod, ev.PluginName); !status.IsSuccess() { - return nil, status - } - } - - return framework.NewPostFilterResultWithNominatedNode(bestCandidate.Name()), framework.NewStatus(framework.Success) -} - -// FindCandidates calculates a slice of preemption candidates. -// Each candidate is executable to make the given schedulable. -func (ev *Evaluator) findCandidates(ctx context.Context, state *framework.CycleState, allNodes []*framework.NodeInfo, pod *v1.Pod, m framework.NodeToStatusReader) ([]Candidate, *framework.NodeToStatus, error) { - if len(allNodes) == 0 { - return nil, nil, errors.New("no nodes available") - } - logger := klog.FromContext(ctx) - // Get a list of nodes with failed predicates (Unschedulable) that may be satisfied by removing pods from the node. - potentialNodes, err := m.NodesForStatusCode(ev.Handler.SnapshotSharedLister().NodeInfos(), framework.Unschedulable) - if err != nil { - return nil, nil, err - } - if len(potentialNodes) == 0 { - logger.V(3).Info("Preemption will not help schedule pod on any node", "pod", klog.KObj(pod)) - // In this case, we should clean-up any existing nominated node name of the pod. - if err := util.ClearNominatedNodeName(ctx, ev.Handler.ClientSet(), pod); err != nil { - logger.Error(err, "Could not clear the nominatedNodeName field of pod", "pod", klog.KObj(pod)) - // We do not return as this error is not critical. - } - return nil, framework.NewDefaultNodeToStatus(), nil - } - - pdbs, err := getPodDisruptionBudgets(ev.PdbLister) - if err != nil { - return nil, nil, err - } - - offset, candidatesNum := ev.GetOffsetAndNumCandidates(int32(len(potentialNodes))) - return ev.DryRunPreemption(ctx, state, pod, potentialNodes, pdbs, offset, candidatesNum) -} - -// callExtenders calls given to select the list of feasible candidates. -// We will only check with extenders that support preemption. -// Extenders which do not support preemption may later prevent preemptor from being scheduled on the nominated -// node. In that case, scheduler will find a different host for the preemptor in subsequent scheduling cycles. -func (ev *Evaluator) callExtenders(logger klog.Logger, pod *v1.Pod, candidates []Candidate) ([]Candidate, *framework.Status) { - extenders := ev.Handler.Extenders() - nodeLister := ev.Handler.SnapshotSharedLister().NodeInfos() - if len(extenders) == 0 { - return candidates, nil - } - - // Migrate candidate slice to victimsMap to adapt to the Extender interface. - // It's only applicable for candidate slice that have unique nominated node name. - victimsMap := ev.CandidatesToVictimsMap(candidates) - if len(victimsMap) == 0 { - return candidates, nil - } - for _, extender := range extenders { - if !extender.SupportsPreemption() || !extender.IsInterested(pod) { - continue - } - nodeNameToVictims, err := extender.ProcessPreemption(pod, victimsMap, nodeLister) - if err != nil { - if extender.IsIgnorable() { - logger.Info("Skipped extender as it returned error and has ignorable flag set", - "extender", extender.Name(), "err", err) - continue - } - return nil, framework.AsStatus(err) - } - // Check if the returned victims are valid. - for nodeName, victims := range nodeNameToVictims { - if victims == nil || len(victims.Pods) == 0 { - if extender.IsIgnorable() { - delete(nodeNameToVictims, nodeName) - logger.Info("Ignored node for which the extender didn't report victims", "node", klog.KRef("", nodeName), "extender", extender.Name()) - continue - } - return nil, framework.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeName)) - } - } - - // Replace victimsMap with new result after preemption. So the - // rest of extenders can continue use it as parameter. - victimsMap = nodeNameToVictims - - // If node list becomes empty, no preemption can happen regardless of other extenders. - if len(victimsMap) == 0 { - break - } - } - - var newCandidates []Candidate - for nodeName := range victimsMap { - newCandidates = append(newCandidates, &candidate{ - victims: victimsMap[nodeName], - name: nodeName, - }) - } - return newCandidates, nil -} - -// SelectCandidate chooses the best-fit candidate from given and return it. -// NOTE: This method is exported for easier testing in default preemption. -func (ev *Evaluator) SelectCandidate(ctx context.Context, candidates []Candidate) Candidate { - logger := klog.FromContext(ctx) - - if len(candidates) == 0 { - return nil - } - if len(candidates) == 1 { - return candidates[0] - } - - victimsMap := ev.CandidatesToVictimsMap(candidates) - scoreFuncs := ev.OrderedScoreFuncs(ctx, victimsMap) - candidateNode := pickOneNodeForPreemption(logger, victimsMap, scoreFuncs) - - // Same as candidatesToVictimsMap, this logic is not applicable for out-of-tree - // preemption plugins that exercise different candidates on the same nominated node. - if victims := victimsMap[candidateNode]; victims != nil { - return &candidate{ - victims: victims, - name: candidateNode, - } - } - - // We shouldn't reach here. - logger.Error(errors.New("no candidate selected"), "Should not reach here", "candidates", candidates) - // To not break the whole flow, return the first candidate. - return candidates[0] -} - -// prepareCandidate does some preparation work before nominating the selected candidate: -// - Evict the victim pods -// - Reject the victim pods if they are in waitingPod map -// - Clear the low-priority pods' nominatedNodeName status if needed -func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1.Pod, pluginName string) *framework.Status { - fh := ev.Handler - cs := ev.Handler.ClientSet() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - logger := klog.FromContext(ctx) - errCh := parallelize.NewErrorChannel() - fh.Parallelizer().Until(ctx, len(c.Victims().Pods), func(index int) { - if err := ev.PreemptPod(ctx, c, pod, c.Victims().Pods[index], pluginName); err != nil { - errCh.SendErrorWithCancel(err, cancel) - } - }, ev.PluginName) - if err := errCh.ReceiveError(); err != nil { - return framework.AsStatus(err) - } - - metrics.PreemptionVictims.Observe(float64(len(c.Victims().Pods))) - - // Lower priority pods nominated to run on this node, may no longer fit on - // this node. So, we should remove their nomination. Removing their - // nomination updates these pods and moves them to the active queue. It - // lets scheduler find another place for them. - nominatedPods := getLowerPriorityNominatedPods(logger, fh, pod, c.Name()) - if err := util.ClearNominatedNodeName(ctx, cs, nominatedPods...); err != nil { - logger.Error(err, "Cannot clear 'NominatedNodeName' field") - // We do not return as this error is not critical. - } - - return nil -} - -// prepareCandidateAsync triggers a goroutine for some preparation work: -// - Evict the victim pods -// - Reject the victim pods if they are in waitingPod map -// - Clear the low-priority pods' nominatedNodeName status if needed -// The Pod won't be retried until the goroutine triggered here completes. -// -// See http://kep.k8s.io/4832 for how the async preemption works. -func (ev *Evaluator) prepareCandidateAsync(c Candidate, pod *v1.Pod, pluginName string) { - metrics.PreemptionVictims.Observe(float64(len(c.Victims().Pods))) - - // Intentionally create a new context, not using a ctx from the scheduling cycle, to create ctx, - // because this process could continue even after this scheduling cycle finishes. - ctx, cancel := context.WithCancel(context.Background()) - errCh := parallelize.NewErrorChannel() - preemptPod := func(index int) { - victim := c.Victims().Pods[index] - if err := ev.PreemptPod(ctx, c, pod, victim, pluginName); err != nil { - errCh.SendErrorWithCancel(err, cancel) - } - } - - ev.mu.Lock() - ev.preempting.Insert(pod.UID) - ev.mu.Unlock() - - logger := klog.FromContext(ctx) - go func() { - startTime := time.Now() - result := metrics.GoroutineResultSuccess - defer metrics.PreemptionGoroutinesDuration.WithLabelValues(result).Observe(metrics.SinceInSeconds(startTime)) - defer metrics.PreemptionGoroutinesExecutionTotal.WithLabelValues(result).Inc() - defer func() { - if result == metrics.GoroutineResultError { - // When API call isn't successful, the Pod may get stuck in the unschedulable pod pool in the worst case. - // So, we should move the Pod to the activeQ. - ev.Handler.Activate(logger, map[string]*v1.Pod{pod.Name: pod}) - } - }() - defer cancel() - logger.V(2).Info("Start the preemption asynchronously", "preemptor", klog.KObj(pod), "node", c.Name(), "numVictims", len(c.Victims().Pods)) - - // Lower priority pods nominated to run on this node, may no longer fit on - // this node. So, we should remove their nomination. Removing their - // nomination updates these pods and moves them to the active queue. It - // lets scheduler find another place for them. - nominatedPods := getLowerPriorityNominatedPods(logger, ev.Handler, pod, c.Name()) - if err := util.ClearNominatedNodeName(ctx, ev.Handler.ClientSet(), nominatedPods...); err != nil { - logger.Error(err, "Cannot clear 'NominatedNodeName' field from lower priority pods on the same target node", "node", c.Name()) - result = metrics.GoroutineResultError - // We do not return as this error is not critical. - } - - if len(c.Victims().Pods) == 0 { - ev.mu.Lock() - delete(ev.preempting, pod.UID) - ev.mu.Unlock() - - return - } - - // We can evict all victims in parallel, but the last one. - // We have to remove the pod from the preempting map before the last one is evicted - // because, otherwise, the pod removal might be notified to the scheduling queue before - // we remove this pod from the preempting map, - // and the pod could end up stucking at the unschedulable pod pool - // by all the pod removal events being ignored. - ev.Handler.Parallelizer().Until(ctx, len(c.Victims().Pods)-1, preemptPod, ev.PluginName) - if err := errCh.ReceiveError(); err != nil { - logger.Error(err, "Error occurred during async preemption") - result = metrics.GoroutineResultError - } - - ev.mu.Lock() - delete(ev.preempting, pod.UID) - ev.mu.Unlock() - - if err := ev.PreemptPod(ctx, c, pod, c.Victims().Pods[len(c.Victims().Pods)-1], pluginName); err != nil { - logger.Error(err, "Error occurred during async preemption") - result = metrics.GoroutineResultError - } - - logger.V(2).Info("Async Preemption finished completely", "preemptor", klog.KObj(pod), "node", c.Name(), "result", result) - }() -} - -func getPodDisruptionBudgets(pdbLister policylisters.PodDisruptionBudgetLister) ([]*policy.PodDisruptionBudget, error) { - if pdbLister != nil { - return pdbLister.List(labels.Everything()) - } - return nil, nil -} - -// pickOneNodeForPreemption chooses one node among the given nodes. -// It assumes pods in each map entry are ordered by decreasing priority. -// If the scoreFuns is not empty, It picks a node based on score scoreFuns returns. -// If the scoreFuns is empty, -// It picks a node based on the following criteria: -// 1. A node with minimum number of PDB violations. -// 2. A node with minimum highest priority victim is picked. -// 3. Ties are broken by sum of priorities of all victims. -// 4. If there are still ties, node with the minimum number of victims is picked. -// 5. If there are still ties, node with the latest start time of all highest priority victims is picked. -// 6. If there are still ties, the first such node is picked (sort of randomly). -// The 'minNodes1' and 'minNodes2' are being reused here to save the memory -// allocation and garbage collection time. -func pickOneNodeForPreemption(logger klog.Logger, nodesToVictims map[string]*extenderv1.Victims, scoreFuncs []func(node string) int64) string { - if len(nodesToVictims) == 0 { - return "" - } - - allCandidates := make([]string, 0, len(nodesToVictims)) - for node := range nodesToVictims { - allCandidates = append(allCandidates, node) - } - - if len(scoreFuncs) == 0 { - minNumPDBViolatingScoreFunc := func(node string) int64 { - // The smaller the NumPDBViolations, the higher the score. - return -nodesToVictims[node].NumPDBViolations - } - minHighestPriorityScoreFunc := func(node string) int64 { - // highestPodPriority is the highest priority among the victims on this node. - highestPodPriority := corev1helpers.PodPriority(nodesToVictims[node].Pods[0]) - // The smaller the highestPodPriority, the higher the score. - return -int64(highestPodPriority) - } - minSumPrioritiesScoreFunc := func(node string) int64 { - var sumPriorities int64 - for _, pod := range nodesToVictims[node].Pods { - // We add MaxInt32+1 to all priorities to make all of them >= 0. This is - // needed so that a node with a few pods with negative priority is not - // picked over a node with a smaller number of pods with the same negative - // priority (and similar scenarios). - sumPriorities += int64(corev1helpers.PodPriority(pod)) + int64(math.MaxInt32+1) - } - // The smaller the sumPriorities, the higher the score. - return -sumPriorities - } - minNumPodsScoreFunc := func(node string) int64 { - // The smaller the length of pods, the higher the score. - return -int64(len(nodesToVictims[node].Pods)) - } - latestStartTimeScoreFunc := func(node string) int64 { - // Get the earliest start time of all pods on the current node. - earliestStartTimeOnNode := util.GetEarliestPodStartTime(nodesToVictims[node]) - if earliestStartTimeOnNode == nil { - logger.Error(errors.New("earliestStartTime is nil for node"), "Should not reach here", "node", node) - return int64(math.MinInt64) - } - // The bigger the earliestStartTimeOnNode, the higher the score. - return earliestStartTimeOnNode.UnixNano() - } - - // Each scoreFunc scores the nodes according to specific rules and keeps the name of the node - // with the highest score. If and only if the scoreFunc has more than one node with the highest - // score, we will execute the other scoreFunc in order of precedence. - scoreFuncs = []func(string) int64{ - // A node with a minimum number of PDB is preferable. - minNumPDBViolatingScoreFunc, - // A node with a minimum highest priority victim is preferable. - minHighestPriorityScoreFunc, - // A node with the smallest sum of priorities is preferable. - minSumPrioritiesScoreFunc, - // A node with the minimum number of pods is preferable. - minNumPodsScoreFunc, - // A node with the latest start time of all highest priority victims is preferable. - latestStartTimeScoreFunc, - // If there are still ties, then the first Node in the list is selected. - } - } - - for _, f := range scoreFuncs { - selectedNodes := []string{} - maxScore := int64(math.MinInt64) - for _, node := range allCandidates { - score := f(node) - if score > maxScore { - maxScore = score - selectedNodes = []string{} - } - if score == maxScore { - selectedNodes = append(selectedNodes, node) - } - } - if len(selectedNodes) == 1 { - return selectedNodes[0] - } - allCandidates = selectedNodes - } - - return allCandidates[0] -} - -// getLowerPriorityNominatedPods returns pods whose priority is smaller than the -// priority of the given "pod" and are nominated to run on the given node. -// Note: We could possibly check if the nominated lower priority pods still fit -// and return those that no longer fit, but that would require lots of -// manipulation of NodeInfo and PreFilter state per nominated pod. It may not be -// worth the complexity, especially because we generally expect to have a very -// small number of nominated pods per node. -func getLowerPriorityNominatedPods(logger klog.Logger, pn framework.PodNominator, pod *v1.Pod, nodeName string) []*v1.Pod { - podInfos := pn.NominatedPodsForNode(nodeName) - - if len(podInfos) == 0 { - return nil - } - - var lowerPriorityPods []*v1.Pod - podPriority := corev1helpers.PodPriority(pod) - for _, pi := range podInfos { - if corev1helpers.PodPriority(pi.Pod) < podPriority { - lowerPriorityPods = append(lowerPriorityPods, pi.Pod) - } - } - return lowerPriorityPods -} - -// DryRunPreemption simulates Preemption logic on in parallel, -// returns preemption candidates and a map indicating filtered nodes statuses. -// The number of candidates depends on the constraints defined in the plugin's args. In the returned list of -// candidates, ones that do not violate PDB are preferred over ones that do. -// NOTE: This method is exported for easier testing in default preemption. -func (ev *Evaluator) DryRunPreemption(ctx context.Context, state *framework.CycleState, pod *v1.Pod, potentialNodes []*framework.NodeInfo, - pdbs []*policy.PodDisruptionBudget, offset int32, candidatesNum int32) ([]Candidate, *framework.NodeToStatus, error) { - - fh := ev.Handler - nonViolatingCandidates := newCandidateList(candidatesNum) - violatingCandidates := newCandidateList(candidatesNum) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - nodeStatuses := framework.NewDefaultNodeToStatus() - - logger := klog.FromContext(ctx) - logger.V(5).Info("Dry run the preemption", "potentialNodesNumber", len(potentialNodes), "pdbsNumber", len(pdbs), "offset", offset, "candidatesNumber", candidatesNum) - - var statusesLock sync.Mutex - var errs []error - checkNode := func(i int) { - nodeInfoCopy := potentialNodes[(int(offset)+i)%len(potentialNodes)].Snapshot() - logger.V(5).Info("Check the potential node for preemption", "node", nodeInfoCopy.Node().Name) - - stateCopy := state.Clone() - pods, numPDBViolations, status := ev.SelectVictimsOnNode(ctx, stateCopy, pod, nodeInfoCopy, pdbs) - if status.IsSuccess() && len(pods) != 0 { - victims := extenderv1.Victims{ - Pods: pods, - NumPDBViolations: int64(numPDBViolations), - } - c := &candidate{ - victims: &victims, - name: nodeInfoCopy.Node().Name, - } - if numPDBViolations == 0 { - nonViolatingCandidates.add(c) - } else { - violatingCandidates.add(c) - } - nvcSize, vcSize := nonViolatingCandidates.size(), violatingCandidates.size() - if nvcSize > 0 && nvcSize+vcSize >= candidatesNum { - cancel() - } - return - } - if status.IsSuccess() && len(pods) == 0 { - status = framework.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeInfoCopy.Node().Name)) - } - statusesLock.Lock() - if status.Code() == framework.Error { - errs = append(errs, status.AsError()) - } - nodeStatuses.Set(nodeInfoCopy.Node().Name, status) - statusesLock.Unlock() - } - fh.Parallelizer().Until(ctx, len(potentialNodes), checkNode, ev.PluginName) - return append(nonViolatingCandidates.get(), violatingCandidates.get()...), nodeStatuses, utilerrors.NewAggregate(errs) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go deleted file mode 100644 index 77dc23bc52d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go +++ /dev/null @@ -1,1672 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "context" - "errors" - "fmt" - "io" - "reflect" - "sort" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/events" - "k8s.io/component-helpers/scheduling/corev1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/parallelize" - "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/kubernetes/pkg/util/slice" -) - -const ( - // Specifies the maximum timeout a permit plugin can return. - maxTimeout = 15 * time.Minute -) - -// frameworkImpl is the component responsible for initializing and running scheduler -// plugins. -type frameworkImpl struct { - registry Registry - snapshotSharedLister framework.SharedLister - waitingPods *waitingPodsMap - scorePluginWeight map[string]int - preEnqueuePlugins []framework.PreEnqueuePlugin - enqueueExtensions []framework.EnqueueExtensions - queueSortPlugins []framework.QueueSortPlugin - preFilterPlugins []framework.PreFilterPlugin - filterPlugins []framework.FilterPlugin - postFilterPlugins []framework.PostFilterPlugin - preScorePlugins []framework.PreScorePlugin - scorePlugins []framework.ScorePlugin - reservePlugins []framework.ReservePlugin - preBindPlugins []framework.PreBindPlugin - bindPlugins []framework.BindPlugin - postBindPlugins []framework.PostBindPlugin - permitPlugins []framework.PermitPlugin - - // pluginsMap contains all plugins, by name. - pluginsMap map[string]framework.Plugin - - clientSet clientset.Interface - kubeConfig *restclient.Config - eventRecorder events.EventRecorder - informerFactory informers.SharedInformerFactory - sharedDRAManager framework.SharedDRAManager - logger klog.Logger - - metricsRecorder *metrics.MetricAsyncRecorder - profileName string - percentageOfNodesToScore *int32 - - extenders []framework.Extender - framework.PodNominator - framework.PodActivator - - parallelizer parallelize.Parallelizer -} - -// extensionPoint encapsulates desired and applied set of plugins at a specific extension -// point. This is used to simplify iterating over all extension points supported by the -// frameworkImpl. -type extensionPoint struct { - // the set of plugins to be configured at this extension point. - plugins *config.PluginSet - // a pointer to the slice storing plugins implementations that will run at this - // extension point. - slicePtr interface{} -} - -func (f *frameworkImpl) getExtensionPoints(plugins *config.Plugins) []extensionPoint { - return []extensionPoint{ - {&plugins.PreFilter, &f.preFilterPlugins}, - {&plugins.Filter, &f.filterPlugins}, - {&plugins.PostFilter, &f.postFilterPlugins}, - {&plugins.Reserve, &f.reservePlugins}, - {&plugins.PreScore, &f.preScorePlugins}, - {&plugins.Score, &f.scorePlugins}, - {&plugins.PreBind, &f.preBindPlugins}, - {&plugins.Bind, &f.bindPlugins}, - {&plugins.PostBind, &f.postBindPlugins}, - {&plugins.Permit, &f.permitPlugins}, - {&plugins.PreEnqueue, &f.preEnqueuePlugins}, - {&plugins.QueueSort, &f.queueSortPlugins}, - } -} - -// Extenders returns the registered extenders. -func (f *frameworkImpl) Extenders() []framework.Extender { - return f.extenders -} - -type frameworkOptions struct { - componentConfigVersion string - clientSet clientset.Interface - kubeConfig *restclient.Config - eventRecorder events.EventRecorder - informerFactory informers.SharedInformerFactory - sharedDRAManager framework.SharedDRAManager - snapshotSharedLister framework.SharedLister - metricsRecorder *metrics.MetricAsyncRecorder - podNominator framework.PodNominator - podActivator framework.PodActivator - extenders []framework.Extender - captureProfile CaptureProfile - parallelizer parallelize.Parallelizer - waitingPods *waitingPodsMap - logger *klog.Logger -} - -// Option for the frameworkImpl. -type Option func(*frameworkOptions) - -// WithComponentConfigVersion sets the component config version to the -// KubeSchedulerConfiguration version used. The string should be the full -// scheme group/version of the external type we converted from (for example -// "kubescheduler.config.k8s.io/v1") -func WithComponentConfigVersion(componentConfigVersion string) Option { - return func(o *frameworkOptions) { - o.componentConfigVersion = componentConfigVersion - } -} - -// WithClientSet sets clientSet for the scheduling frameworkImpl. -func WithClientSet(clientSet clientset.Interface) Option { - return func(o *frameworkOptions) { - o.clientSet = clientSet - } -} - -// WithKubeConfig sets kubeConfig for the scheduling frameworkImpl. -func WithKubeConfig(kubeConfig *restclient.Config) Option { - return func(o *frameworkOptions) { - o.kubeConfig = kubeConfig - } -} - -// WithEventRecorder sets clientSet for the scheduling frameworkImpl. -func WithEventRecorder(recorder events.EventRecorder) Option { - return func(o *frameworkOptions) { - o.eventRecorder = recorder - } -} - -// WithInformerFactory sets informer factory for the scheduling frameworkImpl. -func WithInformerFactory(informerFactory informers.SharedInformerFactory) Option { - return func(o *frameworkOptions) { - o.informerFactory = informerFactory - } -} - -// WithSharedDRAManager sets SharedDRAManager for the framework. -func WithSharedDRAManager(sharedDRAManager framework.SharedDRAManager) Option { - return func(o *frameworkOptions) { - o.sharedDRAManager = sharedDRAManager - } -} - -// WithSnapshotSharedLister sets the SharedLister of the snapshot. -func WithSnapshotSharedLister(snapshotSharedLister framework.SharedLister) Option { - return func(o *frameworkOptions) { - o.snapshotSharedLister = snapshotSharedLister - } -} - -// WithPodNominator sets podNominator for the scheduling frameworkImpl. -func WithPodNominator(nominator framework.PodNominator) Option { - return func(o *frameworkOptions) { - o.podNominator = nominator - } -} - -func WithPodActivator(activator framework.PodActivator) Option { - return func(o *frameworkOptions) { - o.podActivator = activator - } -} - -// WithExtenders sets extenders for the scheduling frameworkImpl. -func WithExtenders(extenders []framework.Extender) Option { - return func(o *frameworkOptions) { - o.extenders = extenders - } -} - -// WithParallelism sets parallelism for the scheduling frameworkImpl. -func WithParallelism(parallelism int) Option { - return func(o *frameworkOptions) { - o.parallelizer = parallelize.NewParallelizer(parallelism) - } -} - -// CaptureProfile is a callback to capture a finalized profile. -type CaptureProfile func(config.KubeSchedulerProfile) - -// WithCaptureProfile sets a callback to capture the finalized profile. -func WithCaptureProfile(c CaptureProfile) Option { - return func(o *frameworkOptions) { - o.captureProfile = c - } -} - -// WithMetricsRecorder sets metrics recorder for the scheduling frameworkImpl. -func WithMetricsRecorder(r *metrics.MetricAsyncRecorder) Option { - return func(o *frameworkOptions) { - o.metricsRecorder = r - } -} - -// WithWaitingPods sets waitingPods for the scheduling frameworkImpl. -func WithWaitingPods(wp *waitingPodsMap) Option { - return func(o *frameworkOptions) { - o.waitingPods = wp - } -} - -// WithLogger overrides the default logger from k8s.io/klog. -func WithLogger(logger klog.Logger) Option { - return func(o *frameworkOptions) { - o.logger = &logger - } -} - -// defaultFrameworkOptions are applied when no option corresponding to those fields exist. -func defaultFrameworkOptions(stopCh <-chan struct{}) frameworkOptions { - return frameworkOptions{ - metricsRecorder: metrics.NewMetricsAsyncRecorder(1000, time.Second, stopCh), - parallelizer: parallelize.NewParallelizer(parallelize.DefaultParallelism), - } -} - -var _ framework.Framework = &frameworkImpl{} - -// NewFramework initializes plugins given the configuration and the registry. -func NewFramework(ctx context.Context, r Registry, profile *config.KubeSchedulerProfile, opts ...Option) (framework.Framework, error) { - options := defaultFrameworkOptions(ctx.Done()) - for _, opt := range opts { - opt(&options) - } - - logger := klog.FromContext(ctx) - if options.logger != nil { - logger = *options.logger - } - f := &frameworkImpl{ - registry: r, - snapshotSharedLister: options.snapshotSharedLister, - scorePluginWeight: make(map[string]int), - waitingPods: options.waitingPods, - clientSet: options.clientSet, - kubeConfig: options.kubeConfig, - eventRecorder: options.eventRecorder, - informerFactory: options.informerFactory, - sharedDRAManager: options.sharedDRAManager, - metricsRecorder: options.metricsRecorder, - extenders: options.extenders, - PodNominator: options.podNominator, - PodActivator: options.podActivator, - parallelizer: options.parallelizer, - logger: logger, - } - - if len(f.extenders) > 0 { - // Extender doesn't support any kind of requeueing feature like EnqueueExtensions in the scheduling framework. - // We register a defaultEnqueueExtension to framework.ExtenderName here. - // And, in the scheduling cycle, when Extenders reject some Nodes and the pod ends up being unschedulable, - // we put framework.ExtenderName to pInfo.UnschedulablePlugins. - f.enqueueExtensions = []framework.EnqueueExtensions{&defaultEnqueueExtension{pluginName: framework.ExtenderName}} - } - - if profile == nil { - return f, nil - } - - f.profileName = profile.SchedulerName - f.percentageOfNodesToScore = profile.PercentageOfNodesToScore - if profile.Plugins == nil { - return f, nil - } - - // get needed plugins from config - pg := f.pluginsNeeded(profile.Plugins) - - pluginConfig := make(map[string]runtime.Object, len(profile.PluginConfig)) - for i := range profile.PluginConfig { - name := profile.PluginConfig[i].Name - if _, ok := pluginConfig[name]; ok { - return nil, fmt.Errorf("repeated config for plugin %s", name) - } - pluginConfig[name] = profile.PluginConfig[i].Args - } - outputProfile := config.KubeSchedulerProfile{ - SchedulerName: f.profileName, - PercentageOfNodesToScore: f.percentageOfNodesToScore, - Plugins: profile.Plugins, - PluginConfig: make([]config.PluginConfig, 0, len(pg)), - } - - f.pluginsMap = make(map[string]framework.Plugin) - for name, factory := range r { - // initialize only needed plugins. - if !pg.Has(name) { - continue - } - - args := pluginConfig[name] - if args != nil { - outputProfile.PluginConfig = append(outputProfile.PluginConfig, config.PluginConfig{ - Name: name, - Args: args, - }) - } - p, err := factory(ctx, args, f) - if err != nil { - return nil, fmt.Errorf("initializing plugin %q: %w", name, err) - } - f.pluginsMap[name] = p - - f.fillEnqueueExtensions(p) - } - - // initialize plugins per individual extension points - for _, e := range f.getExtensionPoints(profile.Plugins) { - if err := updatePluginList(e.slicePtr, *e.plugins, f.pluginsMap); err != nil { - return nil, err - } - } - - // initialize multiPoint plugins to their expanded extension points - if len(profile.Plugins.MultiPoint.Enabled) > 0 { - if err := f.expandMultiPointPlugins(logger, profile); err != nil { - return nil, err - } - } - - if len(f.queueSortPlugins) != 1 { - return nil, fmt.Errorf("only one queue sort plugin required for profile with scheduler name %q, but got %d", profile.SchedulerName, len(f.queueSortPlugins)) - } - if len(f.bindPlugins) == 0 { - return nil, fmt.Errorf("at least one bind plugin is needed for profile with scheduler name %q", profile.SchedulerName) - } - - if err := getScoreWeights(f, append(profile.Plugins.Score.Enabled, profile.Plugins.MultiPoint.Enabled...)); err != nil { - return nil, err - } - - // Verifying the score weights again since Plugin.Name() could return a different - // value from the one used in the configuration. - for _, scorePlugin := range f.scorePlugins { - if f.scorePluginWeight[scorePlugin.Name()] == 0 { - return nil, fmt.Errorf("score plugin %q is not configured with weight", scorePlugin.Name()) - } - } - - if options.captureProfile != nil { - if len(outputProfile.PluginConfig) != 0 { - sort.Slice(outputProfile.PluginConfig, func(i, j int) bool { - return outputProfile.PluginConfig[i].Name < outputProfile.PluginConfig[j].Name - }) - } else { - outputProfile.PluginConfig = nil - } - options.captureProfile(outputProfile) - } - - // Logs Enabled Plugins at each extension point, taking default plugins, given config, and multipoint into consideration - logger.V(2).Info("the scheduler starts to work with those plugins", "Plugins", *f.ListPlugins()) - f.setInstrumentedPlugins() - return f, nil -} - -// setInstrumentedPlugins initializes instrumented plugins from current plugins that frameworkImpl has. -func (f *frameworkImpl) setInstrumentedPlugins() { - // Cache metric streams for prefilter and filter plugins. - for i, pl := range f.preFilterPlugins { - f.preFilterPlugins[i] = &instrumentedPreFilterPlugin{ - PreFilterPlugin: f.preFilterPlugins[i], - metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.PreFilter, f.profileName), - } - } - for i, pl := range f.filterPlugins { - f.filterPlugins[i] = &instrumentedFilterPlugin{ - FilterPlugin: f.filterPlugins[i], - metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.Filter, f.profileName), - } - } - - // Cache metric streams for prescore and score plugins. - for i, pl := range f.preScorePlugins { - f.preScorePlugins[i] = &instrumentedPreScorePlugin{ - PreScorePlugin: f.preScorePlugins[i], - metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.PreScore, f.profileName), - } - } - for i, pl := range f.scorePlugins { - f.scorePlugins[i] = &instrumentedScorePlugin{ - ScorePlugin: f.scorePlugins[i], - metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.Score, f.profileName), - } - } -} - -func (f *frameworkImpl) SetPodNominator(n framework.PodNominator) { - f.PodNominator = n -} - -func (f *frameworkImpl) SetPodActivator(a framework.PodActivator) { - f.PodActivator = a -} - -// Close closes each plugin, when they implement io.Closer interface. -func (f *frameworkImpl) Close() error { - var errs []error - for name, plugin := range f.pluginsMap { - if closer, ok := plugin.(io.Closer); ok { - err := closer.Close() - if err != nil { - errs = append(errs, fmt.Errorf("%s failed to close: %w", name, err)) - // We try to close all plugins even if we got errors from some. - } - } - } - return errors.Join(errs...) -} - -// getScoreWeights makes sure that, between MultiPoint-Score plugin weights and individual Score -// plugin weights there is not an overflow of MaxTotalScore. -func getScoreWeights(f *frameworkImpl, plugins []config.Plugin) error { - var totalPriority int64 - scorePlugins := reflect.ValueOf(&f.scorePlugins).Elem() - pluginType := scorePlugins.Type().Elem() - for _, e := range plugins { - pg := f.pluginsMap[e.Name] - if !reflect.TypeOf(pg).Implements(pluginType) { - continue - } - - // We append MultiPoint plugins to the list of Score plugins. So if this plugin has already been - // encountered, let the individual Score weight take precedence. - if _, ok := f.scorePluginWeight[e.Name]; ok { - continue - } - // a weight of zero is not permitted, plugins can be disabled explicitly - // when configured. - f.scorePluginWeight[e.Name] = int(e.Weight) - if f.scorePluginWeight[e.Name] == 0 { - f.scorePluginWeight[e.Name] = 1 - } - - // Checks totalPriority against MaxTotalScore to avoid overflow - if int64(f.scorePluginWeight[e.Name])*framework.MaxNodeScore > framework.MaxTotalScore-totalPriority { - return fmt.Errorf("total score of Score plugins could overflow") - } - totalPriority += int64(f.scorePluginWeight[e.Name]) * framework.MaxNodeScore - } - return nil -} - -type orderedSet struct { - set map[string]int - list []string - deletionCnt int -} - -func newOrderedSet() *orderedSet { - return &orderedSet{set: make(map[string]int)} -} - -func (os *orderedSet) insert(s string) { - if os.has(s) { - return - } - os.set[s] = len(os.list) - os.list = append(os.list, s) -} - -func (os *orderedSet) has(s string) bool { - _, found := os.set[s] - return found -} - -func (os *orderedSet) delete(s string) { - if i, found := os.set[s]; found { - delete(os.set, s) - os.list = append(os.list[:i-os.deletionCnt], os.list[i+1-os.deletionCnt:]...) - os.deletionCnt++ - } -} - -func (f *frameworkImpl) expandMultiPointPlugins(logger klog.Logger, profile *config.KubeSchedulerProfile) error { - // initialize MultiPoint plugins - for _, e := range f.getExtensionPoints(profile.Plugins) { - plugins := reflect.ValueOf(e.slicePtr).Elem() - pluginType := plugins.Type().Elem() - // build enabledSet of plugins already registered via normal extension points - // to check double registration - enabledSet := newOrderedSet() - for _, plugin := range e.plugins.Enabled { - enabledSet.insert(plugin.Name) - } - - disabledSet := sets.New[string]() - for _, disabledPlugin := range e.plugins.Disabled { - disabledSet.Insert(disabledPlugin.Name) - } - if disabledSet.Has("*") { - logger.V(4).Info("Skipped MultiPoint expansion because all plugins are disabled for extension point", "extension", pluginType) - continue - } - - // track plugins enabled via multipoint separately from those enabled by specific extensions, - // so that we can distinguish between double-registration and explicit overrides - multiPointEnabled := newOrderedSet() - overridePlugins := newOrderedSet() - for _, ep := range profile.Plugins.MultiPoint.Enabled { - pg, ok := f.pluginsMap[ep.Name] - if !ok { - return fmt.Errorf("%s %q does not exist", pluginType.Name(), ep.Name) - } - - // if this plugin doesn't implement the type for the current extension we're trying to expand, skip - if !reflect.TypeOf(pg).Implements(pluginType) { - continue - } - - // a plugin that's enabled via MultiPoint can still be disabled for specific extension points - if disabledSet.Has(ep.Name) { - logger.V(4).Info("Skipped disabled plugin for extension point", "plugin", ep.Name, "extension", pluginType) - continue - } - - // if this plugin has already been enabled by the specific extension point, - // the user intent is to override the default plugin or make some other explicit setting. - // Either way, discard the MultiPoint value for this plugin. - // This maintains expected behavior for overriding default plugins (see https://github.com/kubernetes/kubernetes/pull/99582) - if enabledSet.has(ep.Name) { - overridePlugins.insert(ep.Name) - logger.Info("MultiPoint plugin is explicitly re-configured; overriding", "plugin", ep.Name) - continue - } - - // if this plugin is already registered via MultiPoint, then this is - // a double registration and an error in the config. - if multiPointEnabled.has(ep.Name) { - return fmt.Errorf("plugin %q already registered as %q", ep.Name, pluginType.Name()) - } - - // we only need to update the multipoint set, since we already have the specific extension set from above - multiPointEnabled.insert(ep.Name) - } - - // Reorder plugins. Here is the expected order: - // - part 1: overridePlugins. Their order stay intact as how they're specified in regular extension point. - // - part 2: multiPointEnabled - i.e., plugin defined in multipoint but not in regular extension point. - // - part 3: other plugins (excluded by part 1 & 2) in regular extension point. - newPlugins := reflect.New(reflect.TypeOf(e.slicePtr).Elem()).Elem() - // part 1 - for _, name := range slice.CopyStrings(enabledSet.list) { - if overridePlugins.has(name) { - newPlugins = reflect.Append(newPlugins, reflect.ValueOf(f.pluginsMap[name])) - enabledSet.delete(name) - } - } - // part 2 - for _, name := range multiPointEnabled.list { - newPlugins = reflect.Append(newPlugins, reflect.ValueOf(f.pluginsMap[name])) - } - // part 3 - for _, name := range enabledSet.list { - newPlugins = reflect.Append(newPlugins, reflect.ValueOf(f.pluginsMap[name])) - } - plugins.Set(newPlugins) - } - return nil -} - -func shouldHaveEnqueueExtensions(p framework.Plugin) bool { - switch p.(type) { - // Only PreEnqueue, PreFilter, Filter, Reserve, and Permit plugins can (should) have EnqueueExtensions. - // See the comment of EnqueueExtensions for more detailed reason here. - case framework.PreEnqueuePlugin, framework.PreFilterPlugin, framework.FilterPlugin, framework.ReservePlugin, framework.PermitPlugin: - return true - } - return false -} - -func (f *frameworkImpl) fillEnqueueExtensions(p framework.Plugin) { - if !shouldHaveEnqueueExtensions(p) { - // Ignore EnqueueExtensions from plugin which isn't PreEnqueue, PreFilter, Filter, Reserve, and Permit. - return - } - - ext, ok := p.(framework.EnqueueExtensions) - if !ok { - // If interface EnqueueExtensions is not implemented, register the default enqueue extensions - // to the plugin because we don't know which events the plugin is interested in. - // This is to ensure backward compatibility. - f.enqueueExtensions = append(f.enqueueExtensions, &defaultEnqueueExtension{pluginName: p.Name()}) - return - } - - f.enqueueExtensions = append(f.enqueueExtensions, ext) -} - -// defaultEnqueueExtension is used when a plugin does not implement EnqueueExtensions interface. -type defaultEnqueueExtension struct { - pluginName string -} - -func (p *defaultEnqueueExtension) Name() string { return p.pluginName } -func (p *defaultEnqueueExtension) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - // need to return all specific cluster events with framework.All action instead of wildcard event - // because the returning values are used to register event handlers. - // If we return the wildcard here, it won't affect the event handlers registered by the plugin - // and some events may not be registered in the event handlers. - return framework.UnrollWildCardResource(), nil -} - -func updatePluginList(pluginList interface{}, pluginSet config.PluginSet, pluginsMap map[string]framework.Plugin) error { - plugins := reflect.ValueOf(pluginList).Elem() - pluginType := plugins.Type().Elem() - set := sets.New[string]() - for _, ep := range pluginSet.Enabled { - pg, ok := pluginsMap[ep.Name] - if !ok { - return fmt.Errorf("%s %q does not exist", pluginType.Name(), ep.Name) - } - - if !reflect.TypeOf(pg).Implements(pluginType) { - return fmt.Errorf("plugin %q does not extend %s plugin", ep.Name, pluginType.Name()) - } - - if set.Has(ep.Name) { - return fmt.Errorf("plugin %q already registered as %q", ep.Name, pluginType.Name()) - } - - set.Insert(ep.Name) - - newPlugins := reflect.Append(plugins, reflect.ValueOf(pg)) - plugins.Set(newPlugins) - } - return nil -} - -// PreEnqueuePlugins returns the registered preEnqueue plugins. -func (f *frameworkImpl) PreEnqueuePlugins() []framework.PreEnqueuePlugin { - return f.preEnqueuePlugins -} - -// EnqueueExtensions returns the registered reenqueue plugins. -func (f *frameworkImpl) EnqueueExtensions() []framework.EnqueueExtensions { - return f.enqueueExtensions -} - -// QueueSortFunc returns the function to sort pods in scheduling queue -func (f *frameworkImpl) QueueSortFunc() framework.LessFunc { - if f == nil { - // If frameworkImpl is nil, simply keep their order unchanged. - // NOTE: this is primarily for tests. - return func(_, _ *framework.QueuedPodInfo) bool { return false } - } - - if len(f.queueSortPlugins) == 0 { - panic("No QueueSort plugin is registered in the frameworkImpl.") - } - - // Only one QueueSort plugin can be enabled. - return f.queueSortPlugins[0].Less -} - -// RunPreFilterPlugins runs the set of configured PreFilter plugins. It returns -// *Status and its code is set to non-success if any of the plugins returns -// anything but Success/Skip. -// When it returns Skip status, returned PreFilterResult and other fields in status are just ignored, -// and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle. -// If a non-success status is returned, then the scheduling cycle is aborted. -func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (_ *framework.PreFilterResult, status *framework.Status, _ sets.Set[string]) { - startTime := time.Now() - skipPlugins := sets.New[string]() - defer func() { - state.SkipFilterPlugins = skipPlugins - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreFilter, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - var result *framework.PreFilterResult - pluginsWithNodes := sets.New[string]() - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "PreFilter") - } - var returnStatus *framework.Status - for _, pl := range f.preFilterPlugins { - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - r, s := f.runPreFilterPlugin(ctx, pl, state, pod) - if s.IsSkip() { - skipPlugins.Insert(pl.Name()) - continue - } - if !s.IsSuccess() { - s.SetPlugin(pl.Name()) - if s.Code() == framework.UnschedulableAndUnresolvable { - // In this case, the preemption shouldn't happen in this scheduling cycle. - // So, no need to execute all PreFilter. - return nil, s, nil - } - if s.Code() == framework.Unschedulable { - // In this case, the preemption should happen later in this scheduling cycle. - // So we need to execute all PreFilter. - // https://github.com/kubernetes/kubernetes/issues/119770 - returnStatus = s - continue - } - return nil, framework.AsStatus(fmt.Errorf("running PreFilter plugin %q: %w", pl.Name(), s.AsError())).WithPlugin(pl.Name()), nil - } - if !r.AllNodes() { - pluginsWithNodes.Insert(pl.Name()) - } - result = result.Merge(r) - if !result.AllNodes() && len(result.NodeNames) == 0 { - msg := fmt.Sprintf("node(s) didn't satisfy plugin(s) %v simultaneously", sets.List(pluginsWithNodes)) - if len(pluginsWithNodes) == 1 { - msg = fmt.Sprintf("node(s) didn't satisfy plugin %v", sets.List(pluginsWithNodes)[0]) - } - - // When PreFilterResult filters out Nodes, the framework considers Nodes that are filtered out as getting "UnschedulableAndUnresolvable". - return result, framework.NewStatus(framework.UnschedulableAndUnresolvable, msg), pluginsWithNodes - } - } - return result, returnStatus, pluginsWithNodes -} - -func (f *frameworkImpl) runPreFilterPlugin(ctx context.Context, pl framework.PreFilterPlugin, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - if !state.ShouldRecordPluginMetrics() { - return pl.PreFilter(ctx, state, pod) - } - startTime := time.Now() - result, status := pl.PreFilter(ctx, state, pod) - f.metricsRecorder.ObservePluginDurationAsync(metrics.PreFilter, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return result, status -} - -// RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured -// PreFilter plugins. It returns directly if any of the plugins return any -// status other than Success. -func (f *frameworkImpl) RunPreFilterExtensionAddPod( - ctx context.Context, - state *framework.CycleState, - podToSchedule *v1.Pod, - podInfoToAdd *framework.PodInfo, - nodeInfo *framework.NodeInfo, -) (status *framework.Status) { - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "PreFilterExtension") - } - for _, pl := range f.preFilterPlugins { - if pl.PreFilterExtensions() == nil || state.SkipFilterPlugins.Has(pl.Name()) { - continue - } - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - status = f.runPreFilterExtensionAddPod(ctx, pl, state, podToSchedule, podInfoToAdd, nodeInfo) - if !status.IsSuccess() { - err := status.AsError() - logger.Error(err, "Plugin failed", "pod", klog.KObj(podToSchedule), "node", klog.KObj(nodeInfo.Node()), "operation", "addPod", "plugin", pl.Name()) - return framework.AsStatus(fmt.Errorf("running AddPod on PreFilter plugin %q: %w", pl.Name(), err)) - } - } - - return nil -} - -func (f *frameworkImpl) runPreFilterExtensionAddPod(ctx context.Context, pl framework.PreFilterPlugin, state *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - if !state.ShouldRecordPluginMetrics() { - return pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podInfoToAdd, nodeInfo) - } - startTime := time.Now() - status := pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podInfoToAdd, nodeInfo) - f.metricsRecorder.ObservePluginDurationAsync(metrics.PreFilterExtensionAddPod, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return status -} - -// RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured -// PreFilter plugins. It returns directly if any of the plugins return any -// status other than Success. -func (f *frameworkImpl) RunPreFilterExtensionRemovePod( - ctx context.Context, - state *framework.CycleState, - podToSchedule *v1.Pod, - podInfoToRemove *framework.PodInfo, - nodeInfo *framework.NodeInfo, -) (status *framework.Status) { - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "PreFilterExtension") - } - for _, pl := range f.preFilterPlugins { - if pl.PreFilterExtensions() == nil || state.SkipFilterPlugins.Has(pl.Name()) { - continue - } - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - status = f.runPreFilterExtensionRemovePod(ctx, pl, state, podToSchedule, podInfoToRemove, nodeInfo) - if !status.IsSuccess() { - err := status.AsError() - logger.Error(err, "Plugin failed", "node", klog.KObj(nodeInfo.Node()), "operation", "removePod", "plugin", pl.Name(), "pod", klog.KObj(podToSchedule)) - return framework.AsStatus(fmt.Errorf("running RemovePod on PreFilter plugin %q: %w", pl.Name(), err)) - } - } - - return nil -} - -func (f *frameworkImpl) runPreFilterExtensionRemovePod(ctx context.Context, pl framework.PreFilterPlugin, state *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - if !state.ShouldRecordPluginMetrics() { - return pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podInfoToRemove, nodeInfo) - } - startTime := time.Now() - status := pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podInfoToRemove, nodeInfo) - f.metricsRecorder.ObservePluginDurationAsync(metrics.PreFilterExtensionRemovePod, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return status -} - -// RunFilterPlugins runs the set of configured Filter plugins for pod on -// the given node. If any of these plugins doesn't return "Success", the -// given node is not suitable for running pod. -// Meanwhile, the failure message and status are set for the given node. -func (f *frameworkImpl) RunFilterPlugins( - ctx context.Context, - state *framework.CycleState, - pod *v1.Pod, - nodeInfo *framework.NodeInfo, -) *framework.Status { - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "Filter") - } - - for _, pl := range f.filterPlugins { - if state.SkipFilterPlugins.Has(pl.Name()) { - continue - } - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - if status := f.runFilterPlugin(ctx, pl, state, pod, nodeInfo); !status.IsSuccess() { - if !status.IsRejected() { - // Filter plugins are not supposed to return any status other than - // Success or Unschedulable. - status = framework.AsStatus(fmt.Errorf("running %q filter plugin: %w", pl.Name(), status.AsError())) - } - status.SetPlugin(pl.Name()) - return status - } - } - - return nil -} - -func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.FilterPlugin, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - if !state.ShouldRecordPluginMetrics() { - return pl.Filter(ctx, state, pod, nodeInfo) - } - startTime := time.Now() - status := pl.Filter(ctx, state, pod, nodeInfo) - f.metricsRecorder.ObservePluginDurationAsync(metrics.Filter, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return status -} - -// RunPostFilterPlugins runs the set of configured PostFilter plugins until the first -// Success, Error or UnschedulableAndUnresolvable is met; otherwise continues to execute all plugins. -func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (_ *framework.PostFilterResult, status *framework.Status) { - startTime := time.Now() - defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PostFilter, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "PostFilter") - } - - // `result` records the last meaningful(non-noop) PostFilterResult. - var result *framework.PostFilterResult - var reasons []string - var rejectorPlugin string - for _, pl := range f.postFilterPlugins { - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - r, s := f.runPostFilterPlugin(ctx, pl, state, pod, filteredNodeStatusMap) - if s.IsSuccess() { - return r, s - } else if s.Code() == framework.UnschedulableAndUnresolvable { - return r, s.WithPlugin(pl.Name()) - } else if !s.IsRejected() { - // Any status other than Success, Unschedulable or UnschedulableAndUnresolvable is Error. - return nil, framework.AsStatus(s.AsError()).WithPlugin(pl.Name()) - } else if r != nil && r.Mode() != framework.ModeNoop { - result = r - } - - reasons = append(reasons, s.Reasons()...) - // Record the first failed plugin unless we proved that - // the latter is more relevant. - if len(rejectorPlugin) == 0 { - rejectorPlugin = pl.Name() - } - } - - return result, framework.NewStatus(framework.Unschedulable, reasons...).WithPlugin(rejectorPlugin) -} - -func (f *frameworkImpl) runPostFilterPlugin(ctx context.Context, pl framework.PostFilterPlugin, state *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) { - if !state.ShouldRecordPluginMetrics() { - return pl.PostFilter(ctx, state, pod, filteredNodeStatusMap) - } - startTime := time.Now() - r, s := pl.PostFilter(ctx, state, pod, filteredNodeStatusMap) - f.metricsRecorder.ObservePluginDurationAsync(metrics.PostFilter, pl.Name(), s.Code().String(), metrics.SinceInSeconds(startTime)) - return r, s -} - -// RunFilterPluginsWithNominatedPods runs the set of configured filter plugins -// for nominated pod on the given node. -// This function is called from two different places: Schedule and Preempt. -// When it is called from Schedule, we want to test whether the pod is -// schedulable on the node with all the existing pods on the node plus higher -// and equal priority pods nominated to run on the node. -// When it is called from Preempt, we should remove the victims of preemption -// and add the nominated pods. Removal of the victims is done by -// SelectVictimsOnNode(). Preempt removes victims from PreFilter state and -// NodeInfo before calling this function. -func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, state *framework.CycleState, pod *v1.Pod, info *framework.NodeInfo) *framework.Status { - var status *framework.Status - - podsAdded := false - // We run filters twice in some cases. If the node has greater or equal priority - // nominated pods, we run them when those pods are added to PreFilter state and nodeInfo. - // If all filters succeed in this pass, we run them again when these - // nominated pods are not added. This second pass is necessary because some - // filters such as inter-pod affinity may not pass without the nominated pods. - // If there are no nominated pods for the node or if the first run of the - // filters fail, we don't run the second pass. - // We consider only equal or higher priority pods in the first pass, because - // those are the current "pod" must yield to them and not take a space opened - // for running them. It is ok if the current "pod" take resources freed for - // lower priority pods. - // Requiring that the new pod is schedulable in both circumstances ensures that - // we are making a conservative decision: filters like resources and inter-pod - // anti-affinity are more likely to fail when the nominated pods are treated - // as running, while filters like pod affinity are more likely to fail when - // the nominated pods are treated as not running. We can't just assume the - // nominated pods are running because they are not running right now and in fact, - // they may end up getting scheduled to a different node. - logger := klog.FromContext(ctx) - logger = klog.LoggerWithName(logger, "FilterWithNominatedPods") - ctx = klog.NewContext(ctx, logger) - for i := 0; i < 2; i++ { - stateToUse := state - nodeInfoToUse := info - if i == 0 { - var err error - podsAdded, stateToUse, nodeInfoToUse, err = addGENominatedPods(ctx, f, pod, state, info) - if err != nil { - return framework.AsStatus(err) - } - } else if !podsAdded || !status.IsSuccess() { - break - } - - status = f.RunFilterPlugins(ctx, stateToUse, pod, nodeInfoToUse) - if !status.IsSuccess() && !status.IsRejected() { - return status - } - } - - return status -} - -// addGENominatedPods adds pods with equal or greater priority which are nominated -// to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState, -// 3) augmented nodeInfo. -func addGENominatedPods(ctx context.Context, fh framework.Handle, pod *v1.Pod, state *framework.CycleState, nodeInfo *framework.NodeInfo) (bool, *framework.CycleState, *framework.NodeInfo, error) { - if fh == nil { - // This may happen only in tests. - return false, state, nodeInfo, nil - } - nominatedPodInfos := fh.NominatedPodsForNode(nodeInfo.Node().Name) - if len(nominatedPodInfos) == 0 { - return false, state, nodeInfo, nil - } - nodeInfoOut := nodeInfo.Snapshot() - stateOut := state.Clone() - podsAdded := false - for _, pi := range nominatedPodInfos { - if corev1.PodPriority(pi.Pod) >= corev1.PodPriority(pod) && pi.Pod.UID != pod.UID { - nodeInfoOut.AddPodInfo(pi) - status := fh.RunPreFilterExtensionAddPod(ctx, stateOut, pod, pi, nodeInfoOut) - if !status.IsSuccess() { - return false, state, nodeInfo, status.AsError() - } - podsAdded = true - } - } - return podsAdded, stateOut, nodeInfoOut, nil -} - -// RunPreScorePlugins runs the set of configured pre-score plugins. If any -// of these plugins returns any status other than Success/Skip, the given pod is rejected. -// When it returns Skip status, other fields in status are just ignored, -// and coupled Score plugin will be skipped in this scheduling cycle. -func (f *frameworkImpl) RunPreScorePlugins( - ctx context.Context, - state *framework.CycleState, - pod *v1.Pod, - nodes []*framework.NodeInfo, -) (status *framework.Status) { - startTime := time.Now() - skipPlugins := sets.New[string]() - defer func() { - state.SkipScorePlugins = skipPlugins - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreScore, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "PreScore") - } - for _, pl := range f.preScorePlugins { - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - status = f.runPreScorePlugin(ctx, pl, state, pod, nodes) - if status.IsSkip() { - skipPlugins.Insert(pl.Name()) - continue - } - if !status.IsSuccess() { - return framework.AsStatus(fmt.Errorf("running PreScore plugin %q: %w", pl.Name(), status.AsError())) - } - } - return nil -} - -func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl framework.PreScorePlugin, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status { - if !state.ShouldRecordPluginMetrics() { - return pl.PreScore(ctx, state, pod, nodes) - } - startTime := time.Now() - status := pl.PreScore(ctx, state, pod, nodes) - f.metricsRecorder.ObservePluginDurationAsync(metrics.PreScore, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return status -} - -// RunScorePlugins runs the set of configured scoring plugins. -// It returns a list that stores scores from each plugin and total score for each Node. -// It also returns *Status, which is set to non-success if any of the plugins returns -// a non-success status. -func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (ns []framework.NodePluginScores, status *framework.Status) { - startTime := time.Now() - defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Score, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - allNodePluginScores := make([]framework.NodePluginScores, len(nodes)) - numPlugins := len(f.scorePlugins) - plugins := make([]framework.ScorePlugin, 0, numPlugins) - pluginToNodeScores := make(map[string]framework.NodeScoreList, numPlugins) - for _, pl := range f.scorePlugins { - if state.SkipScorePlugins.Has(pl.Name()) { - continue - } - plugins = append(plugins, pl) - pluginToNodeScores[pl.Name()] = make(framework.NodeScoreList, len(nodes)) - } - ctx, cancel := context.WithCancel(ctx) - defer cancel() - errCh := parallelize.NewErrorChannel() - - if len(plugins) > 0 { - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "Score") - } - // Run Score method for each node in parallel. - f.Parallelizer().Until(ctx, len(nodes), func(index int) { - nodeInfo := nodes[index] - nodeName := nodeInfo.Node().Name - logger := logger - if verboseLogs { - logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName}) - } - for _, pl := range plugins { - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - s, status := f.runScorePlugin(ctx, pl, state, pod, nodeInfo) - if !status.IsSuccess() { - err := fmt.Errorf("plugin %q failed with: %w", pl.Name(), status.AsError()) - errCh.SendErrorWithCancel(err, cancel) - return - } - pluginToNodeScores[pl.Name()][index] = framework.NodeScore{ - Name: nodeName, - Score: s, - } - } - }, metrics.Score) - if err := errCh.ReceiveError(); err != nil { - return nil, framework.AsStatus(fmt.Errorf("running Score plugins: %w", err)) - } - } - - // Run NormalizeScore method for each ScorePlugin in parallel. - f.Parallelizer().Until(ctx, len(plugins), func(index int) { - pl := plugins[index] - if pl.ScoreExtensions() == nil { - return - } - nodeScoreList := pluginToNodeScores[pl.Name()] - status := f.runScoreExtension(ctx, pl, state, pod, nodeScoreList) - if !status.IsSuccess() { - err := fmt.Errorf("plugin %q failed with: %w", pl.Name(), status.AsError()) - errCh.SendErrorWithCancel(err, cancel) - return - } - }, metrics.Score) - if err := errCh.ReceiveError(); err != nil { - return nil, framework.AsStatus(fmt.Errorf("running Normalize on Score plugins: %w", err)) - } - - // Apply score weight for each ScorePlugin in parallel, - // and then, build allNodePluginScores. - f.Parallelizer().Until(ctx, len(nodes), func(index int) { - nodePluginScores := framework.NodePluginScores{ - Name: nodes[index].Node().Name, - Scores: make([]framework.PluginScore, len(plugins)), - } - - for i, pl := range plugins { - weight := f.scorePluginWeight[pl.Name()] - nodeScoreList := pluginToNodeScores[pl.Name()] - score := nodeScoreList[index].Score - - if score > framework.MaxNodeScore || score < framework.MinNodeScore { - err := fmt.Errorf("plugin %q returns an invalid score %v, it should in the range of [%v, %v] after normalizing", pl.Name(), score, framework.MinNodeScore, framework.MaxNodeScore) - errCh.SendErrorWithCancel(err, cancel) - return - } - weightedScore := score * int64(weight) - nodePluginScores.Scores[i] = framework.PluginScore{ - Name: pl.Name(), - Score: weightedScore, - } - nodePluginScores.TotalScore += weightedScore - } - allNodePluginScores[index] = nodePluginScores - }, metrics.Score) - if err := errCh.ReceiveError(); err != nil { - return nil, framework.AsStatus(fmt.Errorf("applying score defaultWeights on Score plugins: %w", err)) - } - - return allNodePluginScores, nil -} - -func (f *frameworkImpl) runScorePlugin(ctx context.Context, pl framework.ScorePlugin, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - if !state.ShouldRecordPluginMetrics() { - return pl.Score(ctx, state, pod, nodeInfo) - } - startTime := time.Now() - s, status := pl.Score(ctx, state, pod, nodeInfo) - f.metricsRecorder.ObservePluginDurationAsync(metrics.Score, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return s, status -} - -func (f *frameworkImpl) runScoreExtension(ctx context.Context, pl framework.ScorePlugin, state *framework.CycleState, pod *v1.Pod, nodeScoreList framework.NodeScoreList) *framework.Status { - if !state.ShouldRecordPluginMetrics() { - return pl.ScoreExtensions().NormalizeScore(ctx, state, pod, nodeScoreList) - } - startTime := time.Now() - status := pl.ScoreExtensions().NormalizeScore(ctx, state, pod, nodeScoreList) - f.metricsRecorder.ObservePluginDurationAsync(metrics.ScoreExtensionNormalize, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return status -} - -// RunPreBindPlugins runs the set of configured prebind plugins. It returns a -// failure (bool) if any of the plugins returns an error. It also returns an -// error containing the rejection message or the error occurred in the plugin. -func (f *frameworkImpl) RunPreBindPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) { - startTime := time.Now() - defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreBind, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "PreBind") - logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName}) - } - for _, pl := range f.preBindPlugins { - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - status = f.runPreBindPlugin(ctx, pl, state, pod, nodeName) - if !status.IsSuccess() { - if status.IsRejected() { - logger.V(4).Info("Pod rejected by PreBind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message()) - status.SetPlugin(pl.Name()) - return status - } - err := status.AsError() - logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod), "node", nodeName) - return framework.AsStatus(fmt.Errorf("running PreBind plugin %q: %w", pl.Name(), err)) - } - } - return nil -} - -func (f *frameworkImpl) runPreBindPlugin(ctx context.Context, pl framework.PreBindPlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status { - if !state.ShouldRecordPluginMetrics() { - return pl.PreBind(ctx, state, pod, nodeName) - } - startTime := time.Now() - status := pl.PreBind(ctx, state, pod, nodeName) - f.metricsRecorder.ObservePluginDurationAsync(metrics.PreBind, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return status -} - -// RunBindPlugins runs the set of configured bind plugins until one returns a non `Skip` status. -func (f *frameworkImpl) RunBindPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) { - startTime := time.Now() - defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Bind, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - if len(f.bindPlugins) == 0 { - return framework.NewStatus(framework.Skip, "") - } - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "Bind") - } - for _, pl := range f.bindPlugins { - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - status = f.runBindPlugin(ctx, pl, state, pod, nodeName) - if status.IsSkip() { - continue - } - if !status.IsSuccess() { - if status.IsRejected() { - logger.V(4).Info("Pod rejected by Bind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message()) - status.SetPlugin(pl.Name()) - return status - } - err := status.AsError() - logger.Error(err, "Plugin Failed", "plugin", pl.Name(), "pod", klog.KObj(pod), "node", nodeName) - return framework.AsStatus(fmt.Errorf("running Bind plugin %q: %w", pl.Name(), err)) - } - return status - } - return status -} - -func (f *frameworkImpl) runBindPlugin(ctx context.Context, bp framework.BindPlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status { - if !state.ShouldRecordPluginMetrics() { - return bp.Bind(ctx, state, pod, nodeName) - } - startTime := time.Now() - status := bp.Bind(ctx, state, pod, nodeName) - f.metricsRecorder.ObservePluginDurationAsync(metrics.Bind, bp.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return status -} - -// RunPostBindPlugins runs the set of configured postbind plugins. -func (f *frameworkImpl) RunPostBindPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) { - startTime := time.Now() - defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PostBind, framework.Success.String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "PostBind") - } - for _, pl := range f.postBindPlugins { - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - f.runPostBindPlugin(ctx, pl, state, pod, nodeName) - } -} - -func (f *frameworkImpl) runPostBindPlugin(ctx context.Context, pl framework.PostBindPlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) { - if !state.ShouldRecordPluginMetrics() { - pl.PostBind(ctx, state, pod, nodeName) - return - } - startTime := time.Now() - pl.PostBind(ctx, state, pod, nodeName) - f.metricsRecorder.ObservePluginDurationAsync(metrics.PostBind, pl.Name(), framework.Success.String(), metrics.SinceInSeconds(startTime)) -} - -// RunReservePluginsReserve runs the Reserve method in the set of configured -// reserve plugins. If any of these plugins returns an error, it does not -// continue running the remaining ones and returns the error. In such a case, -// the pod will not be scheduled and the caller will be expected to call -// RunReservePluginsUnreserve. -func (f *frameworkImpl) RunReservePluginsReserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) { - startTime := time.Now() - defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Reserve, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "Reserve") - logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName}) - } - for _, pl := range f.reservePlugins { - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - status = f.runReservePluginReserve(ctx, pl, state, pod, nodeName) - if !status.IsSuccess() { - if status.IsRejected() { - logger.V(4).Info("Pod rejected by plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", status.Message()) - status.SetPlugin(pl.Name()) - return status - } - err := status.AsError() - logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod)) - return framework.AsStatus(fmt.Errorf("running Reserve plugin %q: %w", pl.Name(), err)) - } - } - return nil -} - -func (f *frameworkImpl) runReservePluginReserve(ctx context.Context, pl framework.ReservePlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status { - if !state.ShouldRecordPluginMetrics() { - return pl.Reserve(ctx, state, pod, nodeName) - } - startTime := time.Now() - status := pl.Reserve(ctx, state, pod, nodeName) - f.metricsRecorder.ObservePluginDurationAsync(metrics.Reserve, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return status -} - -// RunReservePluginsUnreserve runs the Unreserve method in the set of -// configured reserve plugins. -func (f *frameworkImpl) RunReservePluginsUnreserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) { - startTime := time.Now() - defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Unreserve, framework.Success.String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - // Execute the Unreserve operation of each reserve plugin in the - // *reverse* order in which the Reserve operation was executed. - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "Unreserve") - logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName}) - } - for i := len(f.reservePlugins) - 1; i >= 0; i-- { - pl := f.reservePlugins[i] - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - f.runReservePluginUnreserve(ctx, pl, state, pod, nodeName) - } -} - -func (f *frameworkImpl) runReservePluginUnreserve(ctx context.Context, pl framework.ReservePlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) { - if !state.ShouldRecordPluginMetrics() { - pl.Unreserve(ctx, state, pod, nodeName) - return - } - startTime := time.Now() - pl.Unreserve(ctx, state, pod, nodeName) - f.metricsRecorder.ObservePluginDurationAsync(metrics.Unreserve, pl.Name(), framework.Success.String(), metrics.SinceInSeconds(startTime)) -} - -// RunPermitPlugins runs the set of configured permit plugins. If any of these -// plugins returns a status other than "Success" or "Wait", it does not continue -// running the remaining plugins and returns an error. Otherwise, if any of the -// plugins returns "Wait", then this function will create and add waiting pod -// to a map of currently waiting pods and return status with "Wait" code. -// Pod will remain waiting pod for the minimum duration returned by the permit plugins. -func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) { - startTime := time.Now() - defer func() { - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Permit, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) - }() - pluginsWaitTime := make(map[string]time.Duration) - statusCode := framework.Success - logger := klog.FromContext(ctx) - verboseLogs := logger.V(4).Enabled() - if verboseLogs { - logger = klog.LoggerWithName(logger, "Permit") - logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName}) - } - for _, pl := range f.permitPlugins { - ctx := ctx - if verboseLogs { - logger := klog.LoggerWithName(logger, pl.Name()) - ctx = klog.NewContext(ctx, logger) - } - status, timeout := f.runPermitPlugin(ctx, pl, state, pod, nodeName) - if !status.IsSuccess() { - if status.IsRejected() { - logger.V(4).Info("Pod rejected by plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", status.Message()) - return status.WithPlugin(pl.Name()) - } - if status.IsWait() { - // Not allowed to be greater than maxTimeout. - if timeout > maxTimeout { - timeout = maxTimeout - } - pluginsWaitTime[pl.Name()] = timeout - statusCode = framework.Wait - } else { - err := status.AsError() - logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod)) - return framework.AsStatus(fmt.Errorf("running Permit plugin %q: %w", pl.Name(), err)).WithPlugin(pl.Name()) - } - } - } - if statusCode == framework.Wait { - waitingPod := newWaitingPod(pod, pluginsWaitTime) - f.waitingPods.add(waitingPod) - msg := fmt.Sprintf("one or more plugins asked to wait and no plugin rejected pod %q", pod.Name) - logger.V(4).Info("One or more plugins asked to wait and no plugin rejected pod", "pod", klog.KObj(pod)) - return framework.NewStatus(framework.Wait, msg) - } - return nil -} - -func (f *frameworkImpl) runPermitPlugin(ctx context.Context, pl framework.PermitPlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) (*framework.Status, time.Duration) { - if !state.ShouldRecordPluginMetrics() { - return pl.Permit(ctx, state, pod, nodeName) - } - startTime := time.Now() - status, timeout := pl.Permit(ctx, state, pod, nodeName) - f.metricsRecorder.ObservePluginDurationAsync(metrics.Permit, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime)) - return status, timeout -} - -// WaitOnPermit will block, if the pod is a waiting pod, until the waiting pod is rejected or allowed. -func (f *frameworkImpl) WaitOnPermit(ctx context.Context, pod *v1.Pod) *framework.Status { - waitingPod := f.waitingPods.get(pod.UID) - if waitingPod == nil { - return nil - } - defer f.waitingPods.remove(pod.UID) - - logger := klog.FromContext(ctx) - logger.V(4).Info("Pod waiting on permit", "pod", klog.KObj(pod)) - - startTime := time.Now() - s := <-waitingPod.s - metrics.PermitWaitDuration.WithLabelValues(s.Code().String()).Observe(metrics.SinceInSeconds(startTime)) - - if !s.IsSuccess() { - if s.IsRejected() { - logger.V(4).Info("Pod rejected while waiting on permit", "pod", klog.KObj(pod), "status", s.Message()) - return s - } - err := s.AsError() - logger.Error(err, "Failed waiting on permit for pod", "pod", klog.KObj(pod)) - return framework.AsStatus(fmt.Errorf("waiting on permit for pod: %w", err)).WithPlugin(s.Plugin()) - } - return nil -} - -// SnapshotSharedLister returns the scheduler's SharedLister of the latest NodeInfo -// snapshot. The snapshot is taken at the beginning of a scheduling cycle and remains -// unchanged until a pod finishes "Reserve". There is no guarantee that the information -// remains unchanged after "Reserve". -func (f *frameworkImpl) SnapshotSharedLister() framework.SharedLister { - return f.snapshotSharedLister -} - -// IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map. -func (f *frameworkImpl) IterateOverWaitingPods(callback func(framework.WaitingPod)) { - f.waitingPods.iterate(callback) -} - -// GetWaitingPod returns a reference to a WaitingPod given its UID. -func (f *frameworkImpl) GetWaitingPod(uid types.UID) framework.WaitingPod { - if wp := f.waitingPods.get(uid); wp != nil { - return wp - } - return nil // Returning nil instead of *waitingPod(nil). -} - -// RejectWaitingPod rejects a WaitingPod given its UID. -// The returned value indicates if the given pod is waiting or not. -func (f *frameworkImpl) RejectWaitingPod(uid types.UID) bool { - if waitingPod := f.waitingPods.get(uid); waitingPod != nil { - waitingPod.Reject("", "removed") - return true - } - return false -} - -// HasFilterPlugins returns true if at least one filter plugin is defined. -func (f *frameworkImpl) HasFilterPlugins() bool { - return len(f.filterPlugins) > 0 -} - -// HasPostFilterPlugins returns true if at least one postFilter plugin is defined. -func (f *frameworkImpl) HasPostFilterPlugins() bool { - return len(f.postFilterPlugins) > 0 -} - -// HasScorePlugins returns true if at least one score plugin is defined. -func (f *frameworkImpl) HasScorePlugins() bool { - return len(f.scorePlugins) > 0 -} - -// ListPlugins returns a map of extension point name to plugin names configured at each extension -// point. Returns nil if no plugins where configured. -func (f *frameworkImpl) ListPlugins() *config.Plugins { - m := config.Plugins{} - - for _, e := range f.getExtensionPoints(&m) { - plugins := reflect.ValueOf(e.slicePtr).Elem() - extName := plugins.Type().Elem().Name() - var cfgs []config.Plugin - for i := 0; i < plugins.Len(); i++ { - name := plugins.Index(i).Interface().(framework.Plugin).Name() - p := config.Plugin{Name: name} - if extName == "ScorePlugin" { - // Weights apply only to score plugins. - p.Weight = int32(f.scorePluginWeight[name]) - } - cfgs = append(cfgs, p) - } - if len(cfgs) > 0 { - e.plugins.Enabled = cfgs - } - } - return &m -} - -// ClientSet returns a kubernetes clientset. -func (f *frameworkImpl) ClientSet() clientset.Interface { - return f.clientSet -} - -// KubeConfig returns a kubernetes config. -func (f *frameworkImpl) KubeConfig() *restclient.Config { - return f.kubeConfig -} - -// EventRecorder returns an event recorder. -func (f *frameworkImpl) EventRecorder() events.EventRecorder { - return f.eventRecorder -} - -// SharedInformerFactory returns a shared informer factory. -func (f *frameworkImpl) SharedInformerFactory() informers.SharedInformerFactory { - return f.informerFactory -} - -// SharedDRAManager returns the SharedDRAManager of the framework. -func (f *frameworkImpl) SharedDRAManager() framework.SharedDRAManager { - return f.sharedDRAManager -} - -func (f *frameworkImpl) pluginsNeeded(plugins *config.Plugins) sets.Set[string] { - pgSet := sets.Set[string]{} - - if plugins == nil { - return pgSet - } - - find := func(pgs *config.PluginSet) { - for _, pg := range pgs.Enabled { - pgSet.Insert(pg.Name) - } - } - - for _, e := range f.getExtensionPoints(plugins) { - find(e.plugins) - } - // Parse MultiPoint separately since they are not returned by f.getExtensionPoints() - find(&plugins.MultiPoint) - - return pgSet -} - -// ProfileName returns the profile name associated to this framework. -func (f *frameworkImpl) ProfileName() string { - return f.profileName -} - -// PercentageOfNodesToScore returns percentageOfNodesToScore associated to a profile. -func (f *frameworkImpl) PercentageOfNodesToScore() *int32 { - return f.percentageOfNodesToScore -} - -// Parallelizer returns a parallelizer holding parallelism for scheduler. -func (f *frameworkImpl) Parallelizer() parallelize.Parallelizer { - return f.parallelizer -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go deleted file mode 100644 index 04e9004c8c6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "context" - - v1 "k8s.io/api/core/v1" - compbasemetrics "k8s.io/component-base/metrics" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -type instrumentedFilterPlugin struct { - framework.FilterPlugin - - metric compbasemetrics.CounterMetric -} - -var _ framework.FilterPlugin = &instrumentedFilterPlugin{} - -func (p *instrumentedFilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - p.metric.Inc() - return p.FilterPlugin.Filter(ctx, state, pod, nodeInfo) -} - -type instrumentedPreFilterPlugin struct { - framework.PreFilterPlugin - - metric compbasemetrics.CounterMetric -} - -var _ framework.PreFilterPlugin = &instrumentedPreFilterPlugin{} - -func (p *instrumentedPreFilterPlugin) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - result, status := p.PreFilterPlugin.PreFilter(ctx, state, pod) - if !status.IsSkip() { - p.metric.Inc() - } - return result, status -} - -type instrumentedPreScorePlugin struct { - framework.PreScorePlugin - - metric compbasemetrics.CounterMetric -} - -var _ framework.PreScorePlugin = &instrumentedPreScorePlugin{} - -func (p *instrumentedPreScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status { - status := p.PreScorePlugin.PreScore(ctx, state, pod, nodes) - if !status.IsSkip() { - p.metric.Inc() - } - return status -} - -type instrumentedScorePlugin struct { - framework.ScorePlugin - - metric compbasemetrics.CounterMetric -} - -var _ framework.ScorePlugin = &instrumentedScorePlugin{} - -func (p *instrumentedScorePlugin) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) { - p.metric.Inc() - return p.ScorePlugin.Score(ctx, state, pod, nodeInfo) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/registry.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/registry.go deleted file mode 100644 index b6fca3c29cd..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/registry.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "context" - "fmt" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/kubernetes/pkg/scheduler/framework" - plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "sigs.k8s.io/yaml" -) - -// PluginFactory is a function that builds a plugin. -type PluginFactory = func(ctx context.Context, configuration runtime.Object, f framework.Handle) (framework.Plugin, error) - -// PluginFactoryWithFts is a function that builds a plugin with certain feature gates. -type PluginFactoryWithFts func(context.Context, runtime.Object, framework.Handle, plfeature.Features) (framework.Plugin, error) - -// FactoryAdapter can be used to inject feature gates for a plugin that needs -// them when the caller expects the older PluginFactory method. -func FactoryAdapter(fts plfeature.Features, withFts PluginFactoryWithFts) PluginFactory { - return func(ctx context.Context, plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) { - return withFts(ctx, plArgs, fh, fts) - } -} - -// DecodeInto decodes configuration whose type is *runtime.Unknown to the interface into. -func DecodeInto(obj runtime.Object, into interface{}) error { - if obj == nil { - return nil - } - configuration, ok := obj.(*runtime.Unknown) - if !ok { - return fmt.Errorf("want args of type runtime.Unknown, got %T", obj) - } - if configuration.Raw == nil { - return nil - } - - switch configuration.ContentType { - // If ContentType is empty, it means ContentTypeJSON by default. - case runtime.ContentTypeJSON, "": - return json.Unmarshal(configuration.Raw, into) - case runtime.ContentTypeYAML: - return yaml.Unmarshal(configuration.Raw, into) - default: - return fmt.Errorf("not supported content type %s", configuration.ContentType) - } -} - -// Registry is a collection of all available plugins. The framework uses a -// registry to enable and initialize configured plugins. -// All plugins must be in the registry before initializing the framework. -type Registry map[string]PluginFactory - -// Register adds a new plugin to the registry. If a plugin with the same name -// exists, it returns an error. -func (r Registry) Register(name string, factory PluginFactory) error { - if _, ok := r[name]; ok { - return fmt.Errorf("a plugin named %v already exists", name) - } - r[name] = factory - return nil -} - -// Unregister removes an existing plugin from the registry. If no plugin with -// the provided name exists, it returns an error. -func (r Registry) Unregister(name string) error { - if _, ok := r[name]; !ok { - return fmt.Errorf("no plugin named %v exists", name) - } - delete(r, name) - return nil -} - -// Merge merges the provided registry to the current one. -func (r Registry) Merge(in Registry) error { - for name, factory := range in { - if err := r.Register(name, factory); err != nil { - return err - } - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/waiting_pods_map.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/waiting_pods_map.go deleted file mode 100644 index 3d40d4a52a1..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/waiting_pods_map.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/scheduler/framework" -) - -// waitingPodsMap a thread-safe map used to maintain pods waiting in the permit phase. -type waitingPodsMap struct { - pods map[types.UID]*waitingPod - mu sync.RWMutex -} - -// NewWaitingPodsMap returns a new waitingPodsMap. -func NewWaitingPodsMap() *waitingPodsMap { - return &waitingPodsMap{ - pods: make(map[types.UID]*waitingPod), - } -} - -// add a new WaitingPod to the map. -func (m *waitingPodsMap) add(wp *waitingPod) { - m.mu.Lock() - defer m.mu.Unlock() - m.pods[wp.GetPod().UID] = wp -} - -// remove a WaitingPod from the map. -func (m *waitingPodsMap) remove(uid types.UID) { - m.mu.Lock() - defer m.mu.Unlock() - delete(m.pods, uid) -} - -// get a WaitingPod from the map. -func (m *waitingPodsMap) get(uid types.UID) *waitingPod { - m.mu.RLock() - defer m.mu.RUnlock() - return m.pods[uid] -} - -// iterate acquires a read lock and iterates over the WaitingPods map. -func (m *waitingPodsMap) iterate(callback func(framework.WaitingPod)) { - m.mu.RLock() - defer m.mu.RUnlock() - for _, v := range m.pods { - callback(v) - } -} - -// waitingPod represents a pod waiting in the permit phase. -type waitingPod struct { - pod *v1.Pod - pendingPlugins map[string]*time.Timer - s chan *framework.Status - mu sync.RWMutex -} - -var _ framework.WaitingPod = &waitingPod{} - -// newWaitingPod returns a new waitingPod instance. -func newWaitingPod(pod *v1.Pod, pluginsMaxWaitTime map[string]time.Duration) *waitingPod { - wp := &waitingPod{ - pod: pod, - // Allow() and Reject() calls are non-blocking. This property is guaranteed - // by using non-blocking send to this channel. This channel has a buffer of size 1 - // to ensure that non-blocking send will not be ignored - possible situation when - // receiving from this channel happens after non-blocking send. - s: make(chan *framework.Status, 1), - } - - wp.pendingPlugins = make(map[string]*time.Timer, len(pluginsMaxWaitTime)) - // The time.AfterFunc calls wp.Reject which iterates through pendingPlugins map. Acquire the - // lock here so that time.AfterFunc can only execute after newWaitingPod finishes. - wp.mu.Lock() - defer wp.mu.Unlock() - for k, v := range pluginsMaxWaitTime { - plugin, waitTime := k, v - wp.pendingPlugins[plugin] = time.AfterFunc(waitTime, func() { - msg := fmt.Sprintf("rejected due to timeout after waiting %v at plugin %v", - waitTime, plugin) - wp.Reject(plugin, msg) - }) - } - - return wp -} - -// GetPod returns a reference to the waiting pod. -func (w *waitingPod) GetPod() *v1.Pod { - return w.pod -} - -// GetPendingPlugins returns a list of pending permit plugin's name. -func (w *waitingPod) GetPendingPlugins() []string { - w.mu.RLock() - defer w.mu.RUnlock() - plugins := make([]string, 0, len(w.pendingPlugins)) - for p := range w.pendingPlugins { - plugins = append(plugins, p) - } - - return plugins -} - -// Allow declares the waiting pod is allowed to be scheduled by plugin pluginName. -// If this is the last remaining plugin to allow, then a success signal is delivered -// to unblock the pod. -func (w *waitingPod) Allow(pluginName string) { - w.mu.Lock() - defer w.mu.Unlock() - if timer, exist := w.pendingPlugins[pluginName]; exist { - timer.Stop() - delete(w.pendingPlugins, pluginName) - } - - // Only signal success status after all plugins have allowed - if len(w.pendingPlugins) != 0 { - return - } - - // The select clause works as a non-blocking send. - // If there is no receiver, it's a no-op (default case). - select { - case w.s <- framework.NewStatus(framework.Success, ""): - default: - } -} - -// Reject declares the waiting pod unschedulable. -func (w *waitingPod) Reject(pluginName, msg string) { - w.mu.RLock() - defer w.mu.RUnlock() - for _, timer := range w.pendingPlugins { - timer.Stop() - } - - // The select clause works as a non-blocking send. - // If there is no receiver, it's a no-op (default case). - select { - case w.s <- framework.NewStatus(framework.Unschedulable, msg).WithPlugin(pluginName): - default: - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go deleted file mode 100644 index fbc02a223a6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go +++ /dev/null @@ -1,1336 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "errors" - "fmt" - "sort" - "strings" - "sync/atomic" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - - "k8s.io/apimachinery/pkg/api/resource" - resourcehelper "k8s.io/component-helpers/resource" - "k8s.io/kubernetes/pkg/features" - schedutil "k8s.io/kubernetes/pkg/scheduler/util" -) - -var generation int64 - -// ActionType is an integer to represent one type of resource change. -// Different ActionTypes can be bit-wised to compose new semantics. -type ActionType int64 - -// Constants for ActionTypes. -// CAUTION for contributors: When you add a new ActionType, you must update the following: -// - The list of basic, podOnly, and nodeOnly. -// - String() method. -const ( - Add ActionType = 1 << iota - Delete - - // UpdateNodeXYZ is only applicable for Node events. - // If you use UpdateNodeXYZ, - // your plugin's QueueingHint is only executed for the specific sub-Update event. - // It's better to narrow down the scope of the event by using them instead of just using Update event - // for better performance in requeueing. - UpdateNodeAllocatable - UpdateNodeLabel - // UpdateNodeTaint is an update for node's taints or node.Spec.Unschedulable. - UpdateNodeTaint - UpdateNodeCondition - UpdateNodeAnnotation - - // UpdatePodXYZ is only applicable for Pod events. - // If you use UpdatePodXYZ, - // your plugin's QueueingHint is only executed for the specific sub-Update event. - // It's better to narrow down the scope of the event by using them instead of Update event - // for better performance in requeueing. - UpdatePodLabel - // UpdatePodScaleDown is an update for pod's scale down (i.e., any resource request is reduced). - UpdatePodScaleDown - // UpdatePodToleration is an addition for pod's tolerations. - // (Due to API validation, we can add, but cannot modify or remove tolerations.) - UpdatePodToleration - // UpdatePodSchedulingGatesEliminated is an update for pod's scheduling gates, which eliminates all scheduling gates in the Pod. - UpdatePodSchedulingGatesEliminated - // UpdatePodGeneratedResourceClaim is an update of the list of ResourceClaims generated for the pod. - // Depends on the DynamicResourceAllocation feature gate. - UpdatePodGeneratedResourceClaim - - // updatePodOther is a update for pod's other fields. - // It's used only for the internal event handling, and thus unexported. - updatePodOther - - All ActionType = 1<. . ." -func (f *FitError) Error() string { - reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+":", f.NumAllNodes) - preFilterMsg := f.Diagnosis.PreFilterMsg - if preFilterMsg != "" { - // PreFilter plugin returns unschedulable. - // Add the messages from PreFilter plugins to reasonMsg. - reasonMsg += fmt.Sprintf(" %v.", preFilterMsg) - } - - if preFilterMsg == "" { - // the scheduling cycle went through PreFilter extension point successfully. - // - // When the prefilter plugin returns unschedulable, - // the scheduling framework inserts the same unschedulable status to all nodes in NodeToStatusMap. - // So, we shouldn't add the message from NodeToStatusMap when the PreFilter failed. - // Otherwise, we will have duplicated reasons in the error message. - reasons := make(map[string]int) - f.Diagnosis.NodeToStatus.ForEachExplicitNode(func(_ string, status *Status) { - for _, reason := range status.Reasons() { - reasons[reason]++ - } - }) - if f.Diagnosis.NodeToStatus.Len() < f.NumAllNodes { - // Adding predefined reasons for nodes that are absent in NodeToStatusMap - for _, reason := range f.Diagnosis.NodeToStatus.AbsentNodesStatus().Reasons() { - reasons[reason] += f.NumAllNodes - f.Diagnosis.NodeToStatus.Len() - } - } - - sortReasonsHistogram := func() []string { - var reasonStrings []string - for k, v := range reasons { - reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k)) - } - sort.Strings(reasonStrings) - return reasonStrings - } - sortedFilterMsg := sortReasonsHistogram() - if len(sortedFilterMsg) != 0 { - reasonMsg += fmt.Sprintf(" %v.", strings.Join(sortedFilterMsg, ", ")) - } - } - - // Add the messages from PostFilter plugins to reasonMsg. - // We can add this message regardless of whether the scheduling cycle fails at PreFilter or Filter - // since we may run PostFilter (if enabled) in both cases. - postFilterMsg := f.Diagnosis.PostFilterMsg - if postFilterMsg != "" { - reasonMsg += fmt.Sprintf(" %v", postFilterMsg) - } - return reasonMsg -} - -func newAffinityTerm(pod *v1.Pod, term *v1.PodAffinityTerm) (*AffinityTerm, error) { - selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) - if err != nil { - return nil, err - } - - namespaces := getNamespacesFromPodAffinityTerm(pod, term) - nsSelector, err := metav1.LabelSelectorAsSelector(term.NamespaceSelector) - if err != nil { - return nil, err - } - - return &AffinityTerm{Namespaces: namespaces, Selector: selector, TopologyKey: term.TopologyKey, NamespaceSelector: nsSelector}, nil -} - -// GetAffinityTerms receives a Pod and affinity terms and returns the namespaces and -// selectors of the terms. -func GetAffinityTerms(pod *v1.Pod, v1Terms []v1.PodAffinityTerm) ([]AffinityTerm, error) { - if v1Terms == nil { - return nil, nil - } - - var terms []AffinityTerm - for i := range v1Terms { - t, err := newAffinityTerm(pod, &v1Terms[i]) - if err != nil { - // We get here if the label selector failed to process - return nil, err - } - terms = append(terms, *t) - } - return terms, nil -} - -// getWeightedAffinityTerms returns the list of processed affinity terms. -func getWeightedAffinityTerms(pod *v1.Pod, v1Terms []v1.WeightedPodAffinityTerm) ([]WeightedAffinityTerm, error) { - if v1Terms == nil { - return nil, nil - } - - var terms []WeightedAffinityTerm - for i := range v1Terms { - t, err := newAffinityTerm(pod, &v1Terms[i].PodAffinityTerm) - if err != nil { - // We get here if the label selector failed to process - return nil, err - } - terms = append(terms, WeightedAffinityTerm{AffinityTerm: *t, Weight: v1Terms[i].Weight}) - } - return terms, nil -} - -// NewPodInfo returns a new PodInfo. -func NewPodInfo(pod *v1.Pod) (*PodInfo, error) { - pInfo := &PodInfo{} - err := pInfo.Update(pod) - return pInfo, err -} - -func GetPodAffinityTerms(affinity *v1.Affinity) (terms []v1.PodAffinityTerm) { - if affinity != nil && affinity.PodAffinity != nil { - if len(affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { - terms = affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution - } - // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. - // if len(affinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { - // terms = append(terms, affinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution...) - // } - } - return terms -} - -func GetPodAntiAffinityTerms(affinity *v1.Affinity) (terms []v1.PodAffinityTerm) { - if affinity != nil && affinity.PodAntiAffinity != nil { - if len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { - terms = affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution - } - // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. - // if len(affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { - // terms = append(terms, affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...) - // } - } - return terms -} - -// returns a set of names according to the namespaces indicated in podAffinityTerm. -// If namespaces is empty it considers the given pod's namespace. -func getNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.Set[string] { - names := sets.Set[string]{} - if len(podAffinityTerm.Namespaces) == 0 && podAffinityTerm.NamespaceSelector == nil { - names.Insert(pod.Namespace) - } else { - names.Insert(podAffinityTerm.Namespaces...) - } - return names -} - -// ImageStateSummary provides summarized information about the state of an image. -type ImageStateSummary struct { - // Size of the image - Size int64 - // Used to track how many nodes have this image, it is computed from the Nodes field below - // during the execution of Snapshot. - NumNodes int - // A set of node names for nodes having this image present. This field is used for - // keeping track of the nodes during update/add/remove events. - Nodes sets.Set[string] -} - -// Snapshot returns a copy without Nodes field of ImageStateSummary -func (iss *ImageStateSummary) Snapshot() *ImageStateSummary { - return &ImageStateSummary{ - Size: iss.Size, - NumNodes: iss.Nodes.Len(), - } -} - -// NodeInfo is node level aggregated information. -type NodeInfo struct { - // Overall node information. - node *v1.Node - - // Pods running on the node. - Pods []*PodInfo - - // The subset of pods with affinity. - PodsWithAffinity []*PodInfo - - // The subset of pods with required anti-affinity. - PodsWithRequiredAntiAffinity []*PodInfo - - // Ports allocated on the node. - UsedPorts HostPortInfo - - // Total requested resources of all pods on this node. This includes assumed - // pods, which scheduler has sent for binding, but may not be scheduled yet. - Requested *Resource - // Total requested resources of all pods on this node with a minimum value - // applied to each container's CPU and memory requests. This does not reflect - // the actual resource requests for this node, but is used to avoid scheduling - // many zero-request pods onto one node. - NonZeroRequested *Resource - // We store allocatedResources (which is Node.Status.Allocatable.*) explicitly - // as int64, to avoid conversions and accessing map. - Allocatable *Resource - - // ImageStates holds the entry of an image if and only if this image is on the node. The entry can be used for - // checking an image's existence and advanced usage (e.g., image locality scheduling policy) based on the image - // state information. - ImageStates map[string]*ImageStateSummary - - // PVCRefCounts contains a mapping of PVC names to the number of pods on the node using it. - // Keys are in the format "namespace/name". - PVCRefCounts map[string]int - - // Whenever NodeInfo changes, generation is bumped. - // This is used to avoid cloning it if the object didn't change. - Generation int64 -} - -// NodeInfo implements KMetadata, so for example klog.KObjSlice(nodes) works -// when nodes is a []*NodeInfo. -var _ klog.KMetadata = &NodeInfo{} - -func (n *NodeInfo) GetName() string { - if n == nil { - return "" - } - if n.node == nil { - return "" - } - return n.node.Name -} -func (n *NodeInfo) GetNamespace() string { return "" } - -// nextGeneration: Let's make sure history never forgets the name... -// Increments the generation number monotonically ensuring that generation numbers never collide. -// Collision of the generation numbers would be particularly problematic if a node was deleted and -// added back with the same name. See issue#63262. -func nextGeneration() int64 { - return atomic.AddInt64(&generation, 1) -} - -// Resource is a collection of compute resource. -type Resource struct { - MilliCPU int64 - Memory int64 - EphemeralStorage int64 - // We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value()) - // explicitly as int, to avoid conversions and improve performance. - AllowedPodNumber int - // ScalarResources - ScalarResources map[v1.ResourceName]int64 -} - -// NewResource creates a Resource from ResourceList -func NewResource(rl v1.ResourceList) *Resource { - r := &Resource{} - r.Add(rl) - return r -} - -// Add adds ResourceList into Resource. -func (r *Resource) Add(rl v1.ResourceList) { - if r == nil { - return - } - - for rName, rQuant := range rl { - switch rName { - case v1.ResourceCPU: - r.MilliCPU += rQuant.MilliValue() - case v1.ResourceMemory: - r.Memory += rQuant.Value() - case v1.ResourcePods: - r.AllowedPodNumber += int(rQuant.Value()) - case v1.ResourceEphemeralStorage: - r.EphemeralStorage += rQuant.Value() - default: - if schedutil.IsScalarResourceName(rName) { - r.AddScalar(rName, rQuant.Value()) - } - } - } -} - -// Clone returns a copy of this resource. -func (r *Resource) Clone() *Resource { - res := &Resource{ - MilliCPU: r.MilliCPU, - Memory: r.Memory, - AllowedPodNumber: r.AllowedPodNumber, - EphemeralStorage: r.EphemeralStorage, - } - if r.ScalarResources != nil { - res.ScalarResources = make(map[v1.ResourceName]int64, len(r.ScalarResources)) - for k, v := range r.ScalarResources { - res.ScalarResources[k] = v - } - } - return res -} - -// AddScalar adds a resource by a scalar value of this resource. -func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) { - r.SetScalar(name, r.ScalarResources[name]+quantity) -} - -// SetScalar sets a resource by a scalar value of this resource. -func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) { - // Lazily allocate scalar resource map. - if r.ScalarResources == nil { - r.ScalarResources = map[v1.ResourceName]int64{} - } - r.ScalarResources[name] = quantity -} - -// SetMaxResource compares with ResourceList and takes max value for each Resource. -func (r *Resource) SetMaxResource(rl v1.ResourceList) { - if r == nil { - return - } - - for rName, rQuantity := range rl { - switch rName { - case v1.ResourceMemory: - r.Memory = max(r.Memory, rQuantity.Value()) - case v1.ResourceCPU: - r.MilliCPU = max(r.MilliCPU, rQuantity.MilliValue()) - case v1.ResourceEphemeralStorage: - r.EphemeralStorage = max(r.EphemeralStorage, rQuantity.Value()) - default: - if schedutil.IsScalarResourceName(rName) { - r.SetScalar(rName, max(r.ScalarResources[rName], rQuantity.Value())) - } - } - } -} - -// NewNodeInfo returns a ready to use empty NodeInfo object. -// If any pods are given in arguments, their information will be aggregated in -// the returned object. -func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { - ni := &NodeInfo{ - Requested: &Resource{}, - NonZeroRequested: &Resource{}, - Allocatable: &Resource{}, - Generation: nextGeneration(), - UsedPorts: make(HostPortInfo), - ImageStates: make(map[string]*ImageStateSummary), - PVCRefCounts: make(map[string]int), - } - for _, pod := range pods { - ni.AddPod(pod) - } - return ni -} - -// Node returns overall information about this node. -func (n *NodeInfo) Node() *v1.Node { - if n == nil { - return nil - } - return n.node -} - -// Snapshot returns a copy of this node, Except that ImageStates is copied without the Nodes field. -func (n *NodeInfo) Snapshot() *NodeInfo { - clone := &NodeInfo{ - node: n.node, - Requested: n.Requested.Clone(), - NonZeroRequested: n.NonZeroRequested.Clone(), - Allocatable: n.Allocatable.Clone(), - UsedPorts: make(HostPortInfo), - ImageStates: make(map[string]*ImageStateSummary), - PVCRefCounts: make(map[string]int), - Generation: n.Generation, - } - if len(n.Pods) > 0 { - clone.Pods = append([]*PodInfo(nil), n.Pods...) - } - if len(n.UsedPorts) > 0 { - // HostPortInfo is a map-in-map struct - // make sure it's deep copied - for ip, portMap := range n.UsedPorts { - clone.UsedPorts[ip] = make(map[ProtocolPort]struct{}) - for protocolPort, v := range portMap { - clone.UsedPorts[ip][protocolPort] = v - } - } - } - if len(n.PodsWithAffinity) > 0 { - clone.PodsWithAffinity = append([]*PodInfo(nil), n.PodsWithAffinity...) - } - if len(n.PodsWithRequiredAntiAffinity) > 0 { - clone.PodsWithRequiredAntiAffinity = append([]*PodInfo(nil), n.PodsWithRequiredAntiAffinity...) - } - if len(n.ImageStates) > 0 { - state := make(map[string]*ImageStateSummary, len(n.ImageStates)) - for imageName, imageState := range n.ImageStates { - state[imageName] = imageState.Snapshot() - } - clone.ImageStates = state - } - for key, value := range n.PVCRefCounts { - clone.PVCRefCounts[key] = value - } - return clone -} - -// String returns representation of human readable format of this NodeInfo. -func (n *NodeInfo) String() string { - podKeys := make([]string, len(n.Pods)) - for i, p := range n.Pods { - podKeys[i] = p.Pod.Name - } - return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v, AllocatableResource:%#v}", - podKeys, n.Requested, n.NonZeroRequested, n.UsedPorts, n.Allocatable) -} - -// AddPodInfo adds pod information to this NodeInfo. -// Consider using this instead of AddPod if a PodInfo is already computed. -func (n *NodeInfo) AddPodInfo(podInfo *PodInfo) { - n.Pods = append(n.Pods, podInfo) - if podWithAffinity(podInfo.Pod) { - n.PodsWithAffinity = append(n.PodsWithAffinity, podInfo) - } - if podWithRequiredAntiAffinity(podInfo.Pod) { - n.PodsWithRequiredAntiAffinity = append(n.PodsWithRequiredAntiAffinity, podInfo) - } - n.update(podInfo, 1) -} - -// AddPod is a wrapper around AddPodInfo. -func (n *NodeInfo) AddPod(pod *v1.Pod) { - // ignore this err since apiserver doesn't properly validate affinity terms - // and we can't fix the validation for backwards compatibility. - podInfo, _ := NewPodInfo(pod) - n.AddPodInfo(podInfo) -} - -func podWithAffinity(p *v1.Pod) bool { - affinity := p.Spec.Affinity - return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil) -} - -func podWithRequiredAntiAffinity(p *v1.Pod) bool { - affinity := p.Spec.Affinity - return affinity != nil && affinity.PodAntiAffinity != nil && - len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 -} - -func removeFromSlice(logger klog.Logger, s []*PodInfo, k string) ([]*PodInfo, *PodInfo) { - var removedPod *PodInfo - for i := range s { - tmpKey, err := GetPodKey(s[i].Pod) - if err != nil { - logger.Error(err, "Cannot get pod key", "pod", klog.KObj(s[i].Pod)) - continue - } - if k == tmpKey { - removedPod = s[i] - // delete the element - s[i] = s[len(s)-1] - s = s[:len(s)-1] - break - } - } - // resets the slices to nil so that we can do DeepEqual in unit tests. - if len(s) == 0 { - return nil, removedPod - } - return s, removedPod -} - -// RemovePod subtracts pod information from this NodeInfo. -func (n *NodeInfo) RemovePod(logger klog.Logger, pod *v1.Pod) error { - k, err := GetPodKey(pod) - if err != nil { - return err - } - if podWithAffinity(pod) { - n.PodsWithAffinity, _ = removeFromSlice(logger, n.PodsWithAffinity, k) - } - if podWithRequiredAntiAffinity(pod) { - n.PodsWithRequiredAntiAffinity, _ = removeFromSlice(logger, n.PodsWithRequiredAntiAffinity, k) - } - - var removedPod *PodInfo - if n.Pods, removedPod = removeFromSlice(logger, n.Pods, k); removedPod != nil { - n.update(removedPod, -1) - return nil - } - return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name) -} - -// update node info based on the pod, and sign. -// The sign will be set to `+1` when AddPod and to `-1` when RemovePod. -func (n *NodeInfo) update(podInfo *PodInfo, sign int64) { - podResource := podInfo.calculateResource() - n.Requested.MilliCPU += sign * podResource.resource.MilliCPU - n.Requested.Memory += sign * podResource.resource.Memory - n.Requested.EphemeralStorage += sign * podResource.resource.EphemeralStorage - if n.Requested.ScalarResources == nil && len(podResource.resource.ScalarResources) > 0 { - n.Requested.ScalarResources = map[v1.ResourceName]int64{} - } - for rName, rQuant := range podResource.resource.ScalarResources { - n.Requested.ScalarResources[rName] += sign * rQuant - } - n.NonZeroRequested.MilliCPU += sign * podResource.non0CPU - n.NonZeroRequested.Memory += sign * podResource.non0Mem - - // Consume ports when pod added or release ports when pod removed. - n.updateUsedPorts(podInfo.Pod, sign > 0) - n.updatePVCRefCounts(podInfo.Pod, sign > 0) - - n.Generation = nextGeneration() -} - -// getNonMissingContainerRequests returns the default non-zero CPU and memory -// requests for a container that the scheduler uses when container-level and -// pod-level requests are not set for a resource. It returns a ResourceList that -// includes these default non-zero requests, which are essential for the -// scheduler to function correctly. -// The method's behavior depends on whether pod-level resources are set or not: -// 1. When the pod level resources are not set, the method returns a ResourceList -// with the following defaults: -// - CPU: schedutil.DefaultMilliCPURequest -// - Memory: schedutil.DefaultMemoryRequest -// -// These defaults ensure that each container has a minimum resource request, -// allowing the scheduler to aggregate these requests and find a suitable node -// for the pod. -// -// 2. When the pod level resources are set, if a CPU or memory request is -// missing at the container-level *and* at the pod-level, the corresponding -// default value (schedutil.DefaultMilliCPURequest or schedutil.DefaultMemoryRequest) -// is included in the returned ResourceList. -// Note that these default values are not set in the Pod object itself, they are only used -// by the scheduler during node selection. -func getNonMissingContainerRequests(requests v1.ResourceList, podLevelResourcesSet bool) v1.ResourceList { - if !podLevelResourcesSet { - return v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI), - } - } - - nonMissingContainerRequests := make(v1.ResourceList, 2) - // DefaultMilliCPURequest serves as the fallback value when both - // pod-level and container-level CPU requests are not set. - // Note that the apiserver defaulting logic will propagate a non-zero - // container-level CPU request to the pod level if a pod-level request - // is not explicitly set. - if _, exists := requests[v1.ResourceCPU]; !exists { - nonMissingContainerRequests[v1.ResourceCPU] = *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI) - } - - // DefaultMemoryRequest serves as the fallback value when both - // pod-level and container-level CPU requests are unspecified. - // Note that the apiserver defaulting logic will propagate a non-zero - // container-level memory request to the pod level if a pod-level request - // is not explicitly set. - if _, exists := requests[v1.ResourceMemory]; !exists { - nonMissingContainerRequests[v1.ResourceMemory] = *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI) - } - return nonMissingContainerRequests - -} - -func (pi *PodInfo) calculateResource() podResource { - if pi.cachedResource != nil { - return *pi.cachedResource - } - inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) - podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) - requests := resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{ - UseStatusResources: inPlacePodVerticalScalingEnabled, - // SkipPodLevelResources is set to false when PodLevelResources feature is enabled. - SkipPodLevelResources: !podLevelResourcesEnabled, - }) - isPodLevelResourcesSet := podLevelResourcesEnabled && resourcehelper.IsPodLevelRequestsSet(pi.Pod) - nonMissingContainerRequests := getNonMissingContainerRequests(requests, isPodLevelResourcesSet) - non0Requests := requests - if len(nonMissingContainerRequests) > 0 { - non0Requests = resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{ - UseStatusResources: inPlacePodVerticalScalingEnabled, - // SkipPodLevelResources is set to false when PodLevelResources feature is enabled. - SkipPodLevelResources: !podLevelResourcesEnabled, - NonMissingContainerRequests: nonMissingContainerRequests, - }) - } - non0CPU := non0Requests[v1.ResourceCPU] - non0Mem := non0Requests[v1.ResourceMemory] - - var res Resource - res.Add(requests) - podResource := podResource{ - resource: res, - non0CPU: non0CPU.MilliValue(), - non0Mem: non0Mem.Value(), - } - pi.cachedResource = &podResource - return podResource -} - -// updateUsedPorts updates the UsedPorts of NodeInfo. -func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) { - for _, container := range pod.Spec.Containers { - for _, podPort := range container.Ports { - if add { - n.UsedPorts.Add(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) - } else { - n.UsedPorts.Remove(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) - } - } - } -} - -// updatePVCRefCounts updates the PVCRefCounts of NodeInfo. -func (n *NodeInfo) updatePVCRefCounts(pod *v1.Pod, add bool) { - for _, v := range pod.Spec.Volumes { - if v.PersistentVolumeClaim == nil { - continue - } - - key := GetNamespacedName(pod.Namespace, v.PersistentVolumeClaim.ClaimName) - if add { - n.PVCRefCounts[key] += 1 - } else { - n.PVCRefCounts[key] -= 1 - if n.PVCRefCounts[key] <= 0 { - delete(n.PVCRefCounts, key) - } - } - } -} - -// SetNode sets the overall node information. -func (n *NodeInfo) SetNode(node *v1.Node) { - n.node = node - n.Allocatable = NewResource(node.Status.Allocatable) - n.Generation = nextGeneration() -} - -// RemoveNode removes the node object, leaving all other tracking information. -func (n *NodeInfo) RemoveNode() { - n.node = nil - n.Generation = nextGeneration() -} - -// GetPodKey returns the string key of a pod. -func GetPodKey(pod *v1.Pod) (string, error) { - uid := string(pod.UID) - if len(uid) == 0 { - return "", errors.New("cannot get cache key for pod with empty UID") - } - return uid, nil -} - -// GetNamespacedName returns the string format of a namespaced resource name. -func GetNamespacedName(namespace, name string) string { - return fmt.Sprintf("%s/%s", namespace, name) -} - -// DefaultBindAllHostIP defines the default ip address used to bind to all host. -const DefaultBindAllHostIP = "0.0.0.0" - -// ProtocolPort represents a protocol port pair, e.g. tcp:80. -type ProtocolPort struct { - Protocol string - Port int32 -} - -// NewProtocolPort creates a ProtocolPort instance. -func NewProtocolPort(protocol string, port int32) *ProtocolPort { - pp := &ProtocolPort{ - Protocol: protocol, - Port: port, - } - - if len(pp.Protocol) == 0 { - pp.Protocol = string(v1.ProtocolTCP) - } - - return pp -} - -// HostPortInfo stores mapping from ip to a set of ProtocolPort -type HostPortInfo map[string]map[ProtocolPort]struct{} - -// Add adds (ip, protocol, port) to HostPortInfo -func (h HostPortInfo) Add(ip, protocol string, port int32) { - if port <= 0 { - return - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - if _, ok := h[ip]; !ok { - h[ip] = map[ProtocolPort]struct{}{ - *pp: {}, - } - return - } - - h[ip][*pp] = struct{}{} -} - -// Remove removes (ip, protocol, port) from HostPortInfo -func (h HostPortInfo) Remove(ip, protocol string, port int32) { - if port <= 0 { - return - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - if m, ok := h[ip]; ok { - delete(m, *pp) - if len(h[ip]) == 0 { - delete(h, ip) - } - } -} - -// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo -func (h HostPortInfo) Len() int { - length := 0 - for _, m := range h { - length += len(m) - } - return length -} - -// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing -// ones in HostPortInfo. -func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool { - if port <= 0 { - return false - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - - // If ip is 0.0.0.0 check all IP's (protocol, port) pair - if ip == DefaultBindAllHostIP { - for _, m := range h { - if _, ok := m[*pp]; ok { - return true - } - } - return false - } - - // If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair - for _, key := range []string{DefaultBindAllHostIP, ip} { - if m, ok := h[key]; ok { - if _, ok2 := m[*pp]; ok2 { - return true - } - } - } - - return false -} - -// sanitize the parameters -func (h HostPortInfo) sanitize(ip, protocol *string) { - if len(*ip) == 0 { - *ip = DefaultBindAllHostIP - } - if len(*protocol) == 0 { - *protocol = string(v1.ProtocolTCP) - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metric_recorder.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metric_recorder.go deleted file mode 100644 index f59ea1e96f7..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metric_recorder.go +++ /dev/null @@ -1,224 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "time" - - "k8s.io/component-base/metrics" -) - -// MetricRecorder represents a metric recorder which takes action when the -// metric Inc(), Dec() and Clear() -type MetricRecorder interface { - Inc() - Dec() - Clear() -} - -var _ MetricRecorder = &PendingPodsRecorder{} - -// PendingPodsRecorder is an implementation of MetricRecorder -type PendingPodsRecorder struct { - recorder metrics.GaugeMetric -} - -// NewActivePodsRecorder returns ActivePods in a Prometheus metric fashion -func NewActivePodsRecorder() *PendingPodsRecorder { - return &PendingPodsRecorder{ - recorder: ActivePods(), - } -} - -// NewUnschedulablePodsRecorder returns UnschedulablePods in a Prometheus metric fashion -func NewUnschedulablePodsRecorder() *PendingPodsRecorder { - return &PendingPodsRecorder{ - recorder: UnschedulablePods(), - } -} - -// NewBackoffPodsRecorder returns BackoffPods in a Prometheus metric fashion -func NewBackoffPodsRecorder() *PendingPodsRecorder { - return &PendingPodsRecorder{ - recorder: BackoffPods(), - } -} - -// NewGatedPodsRecorder returns GatedPods in a Prometheus metric fashion -func NewGatedPodsRecorder() *PendingPodsRecorder { - return &PendingPodsRecorder{ - recorder: GatedPods(), - } -} - -// Inc increases a metric counter by 1, in an atomic way -func (r *PendingPodsRecorder) Inc() { - r.recorder.Inc() -} - -// Dec decreases a metric counter by 1, in an atomic way -func (r *PendingPodsRecorder) Dec() { - r.recorder.Dec() -} - -// Clear set a metric counter to 0, in an atomic way -func (r *PendingPodsRecorder) Clear() { - r.recorder.Set(float64(0)) -} - -// histogramVecMetric is the data structure passed in the buffer channel between the main framework thread -// and the metricsRecorder goroutine. -type histogramVecMetric struct { - metric *metrics.HistogramVec - labelValues []string - value float64 -} - -type gaugeVecMetric struct { - metric *metrics.GaugeVec - labelValues []string - valueToAdd float64 -} - -type gaugeVecMetricKey struct { - metricName string - labelValue string -} - -// MetricAsyncRecorder records metric in a separate goroutine to avoid overhead in the critical path. -type MetricAsyncRecorder struct { - // bufferCh is a channel that serves as a metrics buffer before the metricsRecorder goroutine reports it. - bufferCh chan *histogramVecMetric - // if bufferSize is reached, incoming metrics will be discarded. - bufferSize int - // how often the recorder runs to flush the metrics. - interval time.Duration - - // aggregatedInflightEventMetric is only to record InFlightEvents metric asynchronously. - // It's a map from gaugeVecMetricKey to the aggregated value - // and the aggregated value is flushed to Prometheus every time the interval is reached. - // Note that we don't lock the map deliberately because we assume the queue takes lock before updating the in-flight events. - aggregatedInflightEventMetric map[gaugeVecMetricKey]int - aggregatedInflightEventMetricLastFlushTime time.Time - aggregatedInflightEventMetricBufferCh chan *gaugeVecMetric - - // stopCh is used to stop the goroutine which periodically flushes metrics. - stopCh <-chan struct{} - // IsStoppedCh indicates whether the goroutine is stopped. It's used in tests only to make sure - // the metric flushing goroutine is stopped so that tests can collect metrics for verification. - IsStoppedCh chan struct{} -} - -func NewMetricsAsyncRecorder(bufferSize int, interval time.Duration, stopCh <-chan struct{}) *MetricAsyncRecorder { - recorder := &MetricAsyncRecorder{ - bufferCh: make(chan *histogramVecMetric, bufferSize), - bufferSize: bufferSize, - interval: interval, - stopCh: stopCh, - aggregatedInflightEventMetric: make(map[gaugeVecMetricKey]int), - aggregatedInflightEventMetricLastFlushTime: time.Now(), - aggregatedInflightEventMetricBufferCh: make(chan *gaugeVecMetric, bufferSize), - IsStoppedCh: make(chan struct{}), - } - go recorder.run() - return recorder -} - -// ObservePluginDurationAsync observes the plugin_execution_duration_seconds metric. -// The metric will be flushed to Prometheus asynchronously. -func (r *MetricAsyncRecorder) ObservePluginDurationAsync(extensionPoint, pluginName, status string, value float64) { - r.observeMetricAsync(PluginExecutionDuration, value, pluginName, extensionPoint, status) -} - -// ObserveQueueingHintDurationAsync observes the queueing_hint_execution_duration_seconds metric. -// The metric will be flushed to Prometheus asynchronously. -func (r *MetricAsyncRecorder) ObserveQueueingHintDurationAsync(pluginName, event, hint string, value float64) { - r.observeMetricAsync(queueingHintExecutionDuration, value, pluginName, event, hint) -} - -// ObserveInFlightEventsAsync observes the in_flight_events metric. -// -// Note that this function is not goroutine-safe; -// we don't lock the map deliberately for the performance reason and we assume the queue (i.e., the caller) takes lock before updating the in-flight events. -func (r *MetricAsyncRecorder) ObserveInFlightEventsAsync(eventLabel string, valueToAdd float64, forceFlush bool) { - r.aggregatedInflightEventMetric[gaugeVecMetricKey{metricName: InFlightEvents.Name, labelValue: eventLabel}] += int(valueToAdd) - - // Only flush the metric to the channel if the interval is reached. - // The values are flushed to Prometheus in the run() function, which runs once the interval time. - // Note: we implement this flushing here, not in FlushMetrics, because, if we did so, we would need to implement a lock for the map, which we want to avoid. - if forceFlush || time.Since(r.aggregatedInflightEventMetricLastFlushTime) > r.interval { - for key, value := range r.aggregatedInflightEventMetric { - newMetric := &gaugeVecMetric{ - metric: InFlightEvents, - labelValues: []string{key.labelValue}, - valueToAdd: float64(value), - } - select { - case r.aggregatedInflightEventMetricBufferCh <- newMetric: - default: - } - } - r.aggregatedInflightEventMetricLastFlushTime = time.Now() - // reset - r.aggregatedInflightEventMetric = make(map[gaugeVecMetricKey]int) - } -} - -func (r *MetricAsyncRecorder) observeMetricAsync(m *metrics.HistogramVec, value float64, labelsValues ...string) { - newMetric := &histogramVecMetric{ - metric: m, - labelValues: labelsValues, - value: value, - } - select { - case r.bufferCh <- newMetric: - default: - } -} - -// run flushes buffered metrics into Prometheus every second. -func (r *MetricAsyncRecorder) run() { - for { - select { - case <-r.stopCh: - close(r.IsStoppedCh) - return - default: - } - r.FlushMetrics() - time.Sleep(r.interval) - } -} - -// FlushMetrics tries to clean up the bufferCh by reading at most bufferSize metrics. -func (r *MetricAsyncRecorder) FlushMetrics() { - for i := 0; i < r.bufferSize; i++ { - select { - case m := <-r.bufferCh: - m.metric.WithLabelValues(m.labelValues...).Observe(m.value) - default: - // no more value - } - - select { - case m := <-r.aggregatedInflightEventMetricBufferCh: - m.metric.WithLabelValues(m.labelValues...).Add(m.valueToAdd) - default: - // no more value - } - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go deleted file mode 100644 index 3d0edacd954..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go +++ /dev/null @@ -1,411 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "sync" - "time" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/component-base/metrics" - "k8s.io/component-base/metrics/legacyregistry" - "k8s.io/kubernetes/pkg/features" - volumebindingmetrics "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics" -) - -const ( - // SchedulerSubsystem - subsystem name used by scheduler. - SchedulerSubsystem = "scheduler" -) - -// Below are possible values for the work and operation label. -const ( - // PrioritizingExtender - prioritizing extender work/operation label value. - PrioritizingExtender = "prioritizing_extender" - // Binding - binding work/operation label value. - Binding = "binding" -) - -const ( - GoroutineResultSuccess = "success" - GoroutineResultError = "error" -) - -// ExtentionPoints is a list of possible values for the extension_point label. -var ExtentionPoints = []string{ - PreFilter, - Filter, - PreFilterExtensionAddPod, - PreFilterExtensionRemovePod, - PostFilter, - PreScore, - Score, - ScoreExtensionNormalize, - PreBind, - Bind, - PostBind, - Reserve, - Unreserve, - Permit, -} - -const ( - PreFilter = "PreFilter" - Filter = "Filter" - PreFilterExtensionAddPod = "PreFilterExtensionAddPod" - PreFilterExtensionRemovePod = "PreFilterExtensionRemovePod" - PostFilter = "PostFilter" - PreScore = "PreScore" - Score = "Score" - ScoreExtensionNormalize = "ScoreExtensionNormalize" - PreBind = "PreBind" - Bind = "Bind" - PostBind = "PostBind" - Reserve = "Reserve" - Unreserve = "Unreserve" - Permit = "Permit" -) - -const ( - QueueingHintResultQueue = "Queue" - QueueingHintResultQueueSkip = "QueueSkip" - QueueingHintResultError = "Error" -) - -const ( - PodPoppedInFlightEvent = "PodPopped" -) - -// All the histogram based metrics have 1ms as size for the smallest bucket. -var ( - scheduleAttempts *metrics.CounterVec - EventHandlingLatency *metrics.HistogramVec - schedulingLatency *metrics.HistogramVec - SchedulingAlgorithmLatency *metrics.Histogram - PreemptionVictims *metrics.Histogram - PreemptionAttempts *metrics.Counter - pendingPods *metrics.GaugeVec - InFlightEvents *metrics.GaugeVec - Goroutines *metrics.GaugeVec - - PodSchedulingSLIDuration *metrics.HistogramVec - PodSchedulingAttempts *metrics.Histogram - FrameworkExtensionPointDuration *metrics.HistogramVec - PluginExecutionDuration *metrics.HistogramVec - - PermitWaitDuration *metrics.HistogramVec - CacheSize *metrics.GaugeVec - // Deprecated: SchedulerCacheSize is deprecated, - // and will be removed at v1.34. Please use CacheSize instead. - SchedulerCacheSize *metrics.GaugeVec - unschedulableReasons *metrics.GaugeVec - PluginEvaluationTotal *metrics.CounterVec - - // The below two are only available when the QHint feature gate is enabled. - queueingHintExecutionDuration *metrics.HistogramVec - SchedulerQueueIncomingPods *metrics.CounterVec - - // The below two are only available when the async-preemption feature gate is enabled. - PreemptionGoroutinesDuration *metrics.HistogramVec - PreemptionGoroutinesExecutionTotal *metrics.CounterVec - - // metricsList is a list of all metrics that should be registered always, regardless of any feature gate's value. - metricsList []metrics.Registerable -) - -var registerMetrics sync.Once - -// Register all metrics. -func Register() { - // Register the metrics. - registerMetrics.Do(func() { - InitMetrics() - RegisterMetrics(metricsList...) - volumebindingmetrics.RegisterVolumeSchedulingMetrics() - - if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) { - RegisterMetrics(queueingHintExecutionDuration, InFlightEvents) - } - if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerAsyncPreemption) { - RegisterMetrics(PreemptionGoroutinesDuration, PreemptionGoroutinesExecutionTotal) - } - }) -} - -func InitMetrics() { - scheduleAttempts = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: SchedulerSubsystem, - Name: "schedule_attempts_total", - Help: "Number of attempts to schedule pods, by the result. 'unschedulable' means a pod could not be scheduled, while 'error' means an internal scheduler problem.", - StabilityLevel: metrics.STABLE, - }, []string{"result", "profile"}) - - EventHandlingLatency = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "event_handling_duration_seconds", - Help: "Event handling latency in seconds.", - // Start with 0.1ms with the last bucket being [~200ms, Inf) - Buckets: metrics.ExponentialBuckets(0.0001, 2, 12), - StabilityLevel: metrics.ALPHA, - }, []string{"event"}) - - schedulingLatency = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "scheduling_attempt_duration_seconds", - Help: "Scheduling attempt latency in seconds (scheduling algorithm + binding)", - Buckets: metrics.ExponentialBuckets(0.001, 2, 15), - StabilityLevel: metrics.STABLE, - }, []string{"result", "profile"}) - SchedulingAlgorithmLatency = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "scheduling_algorithm_duration_seconds", - Help: "Scheduling algorithm latency in seconds", - Buckets: metrics.ExponentialBuckets(0.001, 2, 15), - StabilityLevel: metrics.ALPHA, - }, - ) - PreemptionVictims = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "preemption_victims", - Help: "Number of selected preemption victims", - // we think #victims>64 is pretty rare, therefore [64, +Inf) is considered a single bucket. - Buckets: metrics.ExponentialBuckets(1, 2, 7), - StabilityLevel: metrics.STABLE, - }) - PreemptionAttempts = metrics.NewCounter( - &metrics.CounterOpts{ - Subsystem: SchedulerSubsystem, - Name: "preemption_attempts_total", - Help: "Total preemption attempts in the cluster till now", - StabilityLevel: metrics.STABLE, - }) - pendingPods = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: SchedulerSubsystem, - Name: "pending_pods", - Help: "Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulablePods that the scheduler attempted to schedule and failed; 'gated' is the number of unschedulable pods that the scheduler never attempted to schedule because they are gated.", - StabilityLevel: metrics.STABLE, - }, []string{"queue"}) - InFlightEvents = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: SchedulerSubsystem, - Name: "inflight_events", - Help: "Number of events currently tracked in the scheduling queue.", - StabilityLevel: metrics.ALPHA, - }, []string{"event"}) - Goroutines = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: SchedulerSubsystem, - Name: "goroutines", - Help: "Number of running goroutines split by the work they do such as binding.", - StabilityLevel: metrics.ALPHA, - }, []string{"operation"}) - - PodSchedulingSLIDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "pod_scheduling_sli_duration_seconds", - Help: "E2e latency for a pod being scheduled, from the time the pod enters the scheduling queue and might involve multiple scheduling attempts.", - // Start with 10ms with the last bucket being [~88m, Inf). - Buckets: metrics.ExponentialBuckets(0.01, 2, 20), - StabilityLevel: metrics.BETA, - }, - []string{"attempts"}) - - PodSchedulingAttempts = metrics.NewHistogram( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "pod_scheduling_attempts", - Help: "Number of attempts to successfully schedule a pod.", - Buckets: metrics.ExponentialBuckets(1, 2, 5), - StabilityLevel: metrics.STABLE, - }) - - FrameworkExtensionPointDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "framework_extension_point_duration_seconds", - Help: "Latency for running all plugins of a specific extension point.", - // Start with 0.1ms with the last bucket being [~200ms, Inf) - Buckets: metrics.ExponentialBuckets(0.0001, 2, 12), - StabilityLevel: metrics.STABLE, - }, - []string{"extension_point", "status", "profile"}) - - PluginExecutionDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "plugin_execution_duration_seconds", - Help: "Duration for running a plugin at a specific extension point.", - // Start with 0.01ms with the last bucket being [~22ms, Inf). We use a small factor (1.5) - // so that we have better granularity since plugin latency is very sensitive. - Buckets: metrics.ExponentialBuckets(0.00001, 1.5, 20), - StabilityLevel: metrics.ALPHA, - }, - []string{"plugin", "extension_point", "status"}) - - // This is only available when the QHint feature gate is enabled. - queueingHintExecutionDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "queueing_hint_execution_duration_seconds", - Help: "Duration for running a queueing hint function of a plugin.", - // Start with 0.01ms with the last bucket being [~22ms, Inf). We use a small factor (1.5) - // so that we have better granularity since plugin latency is very sensitive. - Buckets: metrics.ExponentialBuckets(0.00001, 1.5, 20), - StabilityLevel: metrics.ALPHA, - }, - []string{"plugin", "event", "hint"}) - - SchedulerQueueIncomingPods = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: SchedulerSubsystem, - Name: "queue_incoming_pods_total", - Help: "Number of pods added to scheduling queues by event and queue type.", - StabilityLevel: metrics.STABLE, - }, []string{"queue", "event"}) - - PermitWaitDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "permit_wait_duration_seconds", - Help: "Duration of waiting on permit.", - Buckets: metrics.ExponentialBuckets(0.001, 2, 15), - StabilityLevel: metrics.ALPHA, - }, - []string{"result"}) - - SchedulerCacheSize = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: SchedulerSubsystem, - Name: "scheduler_cache_size", - Help: "Number of nodes, pods, and assumed (bound) pods in the scheduler cache.", - StabilityLevel: metrics.ALPHA, - DeprecatedVersion: "1.33.0", - }, []string{"type"}) - - CacheSize = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: SchedulerSubsystem, - Name: "cache_size", - Help: "Number of nodes, pods, and assumed (bound) pods in the scheduler cache.", - StabilityLevel: metrics.ALPHA, - }, []string{"type"}) - - unschedulableReasons = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: SchedulerSubsystem, - Name: "unschedulable_pods", - Help: "The number of unschedulable pods broken down by plugin name. A pod will increment the gauge for all plugins that caused it to not schedule and so this metric have meaning only when broken down by plugin.", - StabilityLevel: metrics.ALPHA, - }, []string{"plugin", "profile"}) - - PluginEvaluationTotal = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: SchedulerSubsystem, - Name: "plugin_evaluation_total", - Help: "Number of attempts to schedule pods by each plugin and the extension point (available only in PreFilter, Filter, PreScore, and Score).", - StabilityLevel: metrics.ALPHA, - }, []string{"plugin", "extension_point", "profile"}) - - PreemptionGoroutinesDuration = metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Subsystem: SchedulerSubsystem, - Name: "preemption_goroutines_duration_seconds", - Help: "Duration in seconds for running goroutines for the preemption.", - Buckets: metrics.ExponentialBuckets(0.01, 2, 20), - StabilityLevel: metrics.ALPHA, - }, - []string{"result"}) - - PreemptionGoroutinesExecutionTotal = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: SchedulerSubsystem, - Name: "preemption_goroutines_execution_total", - Help: "Number of preemption goroutines executed.", - StabilityLevel: metrics.ALPHA, - }, - []string{"result"}) - - metricsList = []metrics.Registerable{ - scheduleAttempts, - schedulingLatency, - SchedulingAlgorithmLatency, - EventHandlingLatency, - PreemptionVictims, - PreemptionAttempts, - pendingPods, - PodSchedulingSLIDuration, - PodSchedulingAttempts, - FrameworkExtensionPointDuration, - PluginExecutionDuration, - SchedulerQueueIncomingPods, - Goroutines, - PermitWaitDuration, - CacheSize, - SchedulerCacheSize, - unschedulableReasons, - PluginEvaluationTotal, - } -} - -// RegisterMetrics registers a list of metrics. -// This function is exported because it is intended to be used by out-of-tree plugins to register their custom metrics. -func RegisterMetrics(extraMetrics ...metrics.Registerable) { - for _, metric := range extraMetrics { - legacyregistry.MustRegister(metric) - } -} - -// GetGather returns the gatherer. It used by test case outside current package. -func GetGather() metrics.Gatherer { - return legacyregistry.DefaultGatherer -} - -// ActivePods returns the pending pods metrics with the label active -func ActivePods() metrics.GaugeMetric { - return pendingPods.With(metrics.Labels{"queue": "active"}) -} - -// BackoffPods returns the pending pods metrics with the label backoff -func BackoffPods() metrics.GaugeMetric { - return pendingPods.With(metrics.Labels{"queue": "backoff"}) -} - -// UnschedulablePods returns the pending pods metrics with the label unschedulable -func UnschedulablePods() metrics.GaugeMetric { - return pendingPods.With(metrics.Labels{"queue": "unschedulable"}) -} - -// GatedPods returns the pending pods metrics with the label gated -func GatedPods() metrics.GaugeMetric { - return pendingPods.With(metrics.Labels{"queue": "gated"}) -} - -// SinceInSeconds gets the time since the specified start in seconds. -func SinceInSeconds(start time.Time) float64 { - return time.Since(start).Seconds() -} - -func UnschedulableReason(plugin string, profile string) metrics.GaugeMetric { - return unschedulableReasons.With(metrics.Labels{"plugin": plugin, "profile": profile}) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/profile_metrics.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/profile_metrics.go deleted file mode 100644 index d8e36e9df0a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/profile_metrics.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -// This file contains helpers for metrics that are associated to a profile. - -var ( - ScheduledResult = "scheduled" - UnschedulableResult = "unschedulable" - ErrorResult = "error" -) - -// PodScheduled can records a successful scheduling attempt and the duration -// since `start`. -func PodScheduled(profile string, duration float64) { - observeScheduleAttemptAndLatency(ScheduledResult, profile, duration) -} - -// PodUnschedulable can records a scheduling attempt for an unschedulable pod -// and the duration since `start`. -func PodUnschedulable(profile string, duration float64) { - observeScheduleAttemptAndLatency(UnschedulableResult, profile, duration) -} - -// PodScheduleError can records a scheduling attempt that had an error and the -// duration since `start`. -func PodScheduleError(profile string, duration float64) { - observeScheduleAttemptAndLatency(ErrorResult, profile, duration) -} - -func observeScheduleAttemptAndLatency(result, profile string, duration float64) { - schedulingLatency.WithLabelValues(result, profile).Observe(duration) - scheduleAttempts.WithLabelValues(result, profile).Inc() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/profile/profile.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/profile/profile.go deleted file mode 100644 index 9cd510d35ee..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/profile/profile.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package profile holds the definition of a scheduling Profile. -package profile - -import ( - "context" - "errors" - "fmt" - - "github.com/google/go-cmp/cmp" //nolint:depguard - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/events" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/framework" - frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" -) - -// RecorderFactory builds an EventRecorder for a given scheduler name. -type RecorderFactory func(string) events.EventRecorder - -// newProfile builds a Profile for the given configuration. -func newProfile(ctx context.Context, cfg config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory, - opts ...frameworkruntime.Option) (framework.Framework, error) { - recorder := recorderFact(cfg.SchedulerName) - opts = append(opts, frameworkruntime.WithEventRecorder(recorder)) - return frameworkruntime.NewFramework(ctx, r, &cfg, opts...) -} - -// Map holds frameworks indexed by scheduler name. -type Map map[string]framework.Framework - -// NewMap builds the frameworks given by the configuration, indexed by name. -func NewMap(ctx context.Context, cfgs []config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory, - opts ...frameworkruntime.Option) (Map, error) { - m := make(Map) - v := cfgValidator{m: m} - - for _, cfg := range cfgs { - p, err := newProfile(ctx, cfg, r, recorderFact, opts...) - if err != nil { - return nil, fmt.Errorf("creating profile for scheduler name %s: %v", cfg.SchedulerName, err) - } - if err := v.validate(cfg, p); err != nil { - return nil, err - } - m[cfg.SchedulerName] = p - } - return m, nil -} - -// HandlesSchedulerName returns whether a profile handles the given scheduler name. -func (m Map) HandlesSchedulerName(name string) bool { - _, ok := m[name] - return ok -} - -// Close closes all frameworks registered in this map. -func (m Map) Close() error { - var errs []error - for name, f := range m { - err := f.Close() - if err != nil { - errs = append(errs, fmt.Errorf("framework %s failed to close: %w", name, err)) - } - } - return errors.Join(errs...) -} - -// NewRecorderFactory returns a RecorderFactory for the broadcaster. -func NewRecorderFactory(b events.EventBroadcaster) RecorderFactory { - return func(name string) events.EventRecorder { - return b.NewRecorder(scheme.Scheme, name) - } -} - -type cfgValidator struct { - m Map - queueSort string - queueSortArgs runtime.Object -} - -func (v *cfgValidator) validate(cfg config.KubeSchedulerProfile, f framework.Framework) error { - if len(f.ProfileName()) == 0 { - return errors.New("scheduler name is needed") - } - if cfg.Plugins == nil { - return fmt.Errorf("plugins required for profile with scheduler name %q", f.ProfileName()) - } - if v.m[f.ProfileName()] != nil { - return fmt.Errorf("duplicate profile with scheduler name %q", f.ProfileName()) - } - - queueSort := f.ListPlugins().QueueSort.Enabled[0].Name - var queueSortArgs runtime.Object - for _, plCfg := range cfg.PluginConfig { - if plCfg.Name == queueSort { - queueSortArgs = plCfg.Args - break - } - } - if len(v.queueSort) == 0 { - v.queueSort = queueSort - v.queueSortArgs = queueSortArgs - return nil - } - if v.queueSort != queueSort { - return fmt.Errorf("different queue sort plugins for profile %q: %q, first: %q", cfg.SchedulerName, queueSort, v.queueSort) - } - if !cmp.Equal(v.queueSortArgs, queueSortArgs) { - return fmt.Errorf("different queue sort plugin args for profile %q", cfg.SchedulerName) - } - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go deleted file mode 100644 index 35b78492255..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go +++ /dev/null @@ -1,1123 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduler - -import ( - "container/heap" - "context" - "errors" - "fmt" - "math/rand" - "strconv" - "sync" - "sync/atomic" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - extenderv1 "k8s.io/kube-scheduler/extender/v1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/apis/core/validation" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/parallelize" - "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/kubernetes/pkg/scheduler/util" - utiltrace "k8s.io/utils/trace" -) - -const ( - // Percentage of plugin metrics to be sampled. - pluginMetricsSamplePercent = 10 - // minFeasibleNodesToFind is the minimum number of nodes that would be scored - // in each scheduling cycle. This is a semi-arbitrary value to ensure that a - // certain minimum of nodes are checked for feasibility. This in turn helps - // ensure a minimum level of spreading. - minFeasibleNodesToFind = 100 - // minFeasibleNodesPercentageToFind is the minimum percentage of nodes that - // would be scored in each scheduling cycle. This is a semi-arbitrary value - // to ensure that a certain minimum of nodes are checked for feasibility. - // This in turn helps ensure a minimum level of spreading. - minFeasibleNodesPercentageToFind = 5 - // numberOfHighestScoredNodesToReport is the number of node scores - // to be included in ScheduleResult. - numberOfHighestScoredNodesToReport = 3 -) - -// ScheduleOne does the entire scheduling workflow for a single pod. It is serialized on the scheduling algorithm's host fitting. -func (sched *Scheduler) ScheduleOne(ctx context.Context) { - logger := klog.FromContext(ctx) - podInfo, err := sched.NextPod(logger) - if err != nil { - logger.Error(err, "Error while retrieving next pod from scheduling queue") - return - } - // pod could be nil when schedulerQueue is closed - if podInfo == nil || podInfo.Pod == nil { - return - } - - pod := podInfo.Pod - // TODO(knelasevero): Remove duplicated keys from log entry calls - // When contextualized logging hits GA - // https://github.com/kubernetes/kubernetes/issues/111672 - logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod)) - ctx = klog.NewContext(ctx, logger) - logger.V(4).Info("About to try and schedule pod", "pod", klog.KObj(pod)) - - fwk, err := sched.frameworkForPod(pod) - if err != nil { - // This shouldn't happen, because we only accept for scheduling the pods - // which specify a scheduler name that matches one of the profiles. - logger.Error(err, "Error occurred") - sched.SchedulingQueue.Done(pod.UID) - return - } - if sched.skipPodSchedule(ctx, fwk, pod) { - // We don't put this Pod back to the queue, but we have to cleanup the in-flight pods/events. - sched.SchedulingQueue.Done(pod.UID) - return - } - - logger.V(3).Info("Attempting to schedule pod", "pod", klog.KObj(pod)) - - // Synchronously attempt to find a fit for the pod. - start := time.Now() - state := framework.NewCycleState() - state.SetRecordPluginMetrics(rand.Intn(100) < pluginMetricsSamplePercent) - - // Initialize an empty podsToActivate struct, which will be filled up by plugins or stay empty. - podsToActivate := framework.NewPodsToActivate() - state.Write(framework.PodsToActivateKey, podsToActivate) - - schedulingCycleCtx, cancel := context.WithCancel(ctx) - defer cancel() - - scheduleResult, assumedPodInfo, status := sched.schedulingCycle(schedulingCycleCtx, state, fwk, podInfo, start, podsToActivate) - if !status.IsSuccess() { - sched.FailureHandler(schedulingCycleCtx, fwk, assumedPodInfo, status, scheduleResult.nominatingInfo, start) - return - } - - // bind the pod to its host asynchronously (we can do this b/c of the assumption step above). - go func() { - bindingCycleCtx, cancel := context.WithCancel(ctx) - defer cancel() - - metrics.Goroutines.WithLabelValues(metrics.Binding).Inc() - defer metrics.Goroutines.WithLabelValues(metrics.Binding).Dec() - - status := sched.bindingCycle(bindingCycleCtx, state, fwk, scheduleResult, assumedPodInfo, start, podsToActivate) - if !status.IsSuccess() { - sched.handleBindingCycleError(bindingCycleCtx, state, fwk, assumedPodInfo, start, scheduleResult, status) - return - } - }() -} - -var clearNominatedNode = &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: ""} - -// schedulingCycle tries to schedule a single Pod. -func (sched *Scheduler) schedulingCycle( - ctx context.Context, - state *framework.CycleState, - fwk framework.Framework, - podInfo *framework.QueuedPodInfo, - start time.Time, - podsToActivate *framework.PodsToActivate, -) (ScheduleResult, *framework.QueuedPodInfo, *framework.Status) { - logger := klog.FromContext(ctx) - pod := podInfo.Pod - scheduleResult, err := sched.SchedulePod(ctx, fwk, state, pod) - if err != nil { - defer func() { - metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInSeconds(start)) - }() - if err == ErrNoNodesAvailable { - status := framework.NewStatus(framework.UnschedulableAndUnresolvable).WithError(err) - return ScheduleResult{nominatingInfo: clearNominatedNode}, podInfo, status - } - - fitError, ok := err.(*framework.FitError) - if !ok { - logger.Error(err, "Error selecting node for pod", "pod", klog.KObj(pod)) - return ScheduleResult{nominatingInfo: clearNominatedNode}, podInfo, framework.AsStatus(err) - } - - // SchedulePod() may have failed because the pod would not fit on any host, so we try to - // preempt, with the expectation that the next time the pod is tried for scheduling it - // will fit due to the preemption. It is also possible that a different pod will schedule - // into the resources that were preempted, but this is harmless. - - if !fwk.HasPostFilterPlugins() { - logger.V(3).Info("No PostFilter plugins are registered, so no preemption will be performed") - return ScheduleResult{}, podInfo, framework.NewStatus(framework.Unschedulable).WithError(err) - } - - // Run PostFilter plugins to attempt to make the pod schedulable in a future scheduling cycle. - result, status := fwk.RunPostFilterPlugins(ctx, state, pod, fitError.Diagnosis.NodeToStatus) - msg := status.Message() - fitError.Diagnosis.PostFilterMsg = msg - if status.Code() == framework.Error { - logger.Error(nil, "Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) - } else { - logger.V(5).Info("Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) - } - - var nominatingInfo *framework.NominatingInfo - if result != nil { - nominatingInfo = result.NominatingInfo - } - return ScheduleResult{nominatingInfo: nominatingInfo}, podInfo, framework.NewStatus(framework.Unschedulable).WithError(err) - } - - metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInSeconds(start)) - // Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet. - // This allows us to keep scheduling without waiting on binding to occur. - assumedPodInfo := podInfo.DeepCopy() - assumedPod := assumedPodInfo.Pod - // assume modifies `assumedPod` by setting NodeName=scheduleResult.SuggestedHost - err = sched.assume(logger, assumedPod, scheduleResult.SuggestedHost) - if err != nil { - // This is most probably result of a BUG in retrying logic. - // We report an error here so that pod scheduling can be retried. - // This relies on the fact that Error will check if the pod has been bound - // to a node and if so will not add it back to the unscheduled pods queue - // (otherwise this would cause an infinite loop). - return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, framework.AsStatus(err) - } - - // Run the Reserve method of reserve plugins. - if sts := fwk.RunReservePluginsReserve(ctx, state, assumedPod, scheduleResult.SuggestedHost); !sts.IsSuccess() { - // trigger un-reserve to clean up state associated with the reserved Pod - fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost) - if forgetErr := sched.Cache.ForgetPod(logger, assumedPod); forgetErr != nil { - logger.Error(forgetErr, "Scheduler cache ForgetPod failed") - } - - if sts.IsRejected() { - fitErr := &framework.FitError{ - NumAllNodes: 1, - Pod: pod, - Diagnosis: framework.Diagnosis{ - NodeToStatus: framework.NewDefaultNodeToStatus(), - }, - } - fitErr.Diagnosis.NodeToStatus.Set(scheduleResult.SuggestedHost, sts) - fitErr.Diagnosis.AddPluginStatus(sts) - return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, framework.NewStatus(sts.Code()).WithError(fitErr) - } - return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, sts - } - - // Run "permit" plugins. - runPermitStatus := fwk.RunPermitPlugins(ctx, state, assumedPod, scheduleResult.SuggestedHost) - if !runPermitStatus.IsWait() && !runPermitStatus.IsSuccess() { - // trigger un-reserve to clean up state associated with the reserved Pod - fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost) - if forgetErr := sched.Cache.ForgetPod(logger, assumedPod); forgetErr != nil { - logger.Error(forgetErr, "Scheduler cache ForgetPod failed") - } - - if runPermitStatus.IsRejected() { - fitErr := &framework.FitError{ - NumAllNodes: 1, - Pod: pod, - Diagnosis: framework.Diagnosis{ - NodeToStatus: framework.NewDefaultNodeToStatus(), - }, - } - fitErr.Diagnosis.NodeToStatus.Set(scheduleResult.SuggestedHost, runPermitStatus) - fitErr.Diagnosis.AddPluginStatus(runPermitStatus) - return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, framework.NewStatus(runPermitStatus.Code()).WithError(fitErr) - } - - return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, runPermitStatus - } - - // At the end of a successful scheduling cycle, pop and move up Pods if needed. - if len(podsToActivate.Map) != 0 { - sched.SchedulingQueue.Activate(logger, podsToActivate.Map) - // Clear the entries after activation. - podsToActivate.Map = make(map[string]*v1.Pod) - } - - return scheduleResult, assumedPodInfo, nil -} - -// bindingCycle tries to bind an assumed Pod. -func (sched *Scheduler) bindingCycle( - ctx context.Context, - state *framework.CycleState, - fwk framework.Framework, - scheduleResult ScheduleResult, - assumedPodInfo *framework.QueuedPodInfo, - start time.Time, - podsToActivate *framework.PodsToActivate) *framework.Status { - logger := klog.FromContext(ctx) - - assumedPod := assumedPodInfo.Pod - - // Run "permit" plugins. - if status := fwk.WaitOnPermit(ctx, assumedPod); !status.IsSuccess() { - if status.IsRejected() { - fitErr := &framework.FitError{ - NumAllNodes: 1, - Pod: assumedPodInfo.Pod, - Diagnosis: framework.Diagnosis{ - NodeToStatus: framework.NewDefaultNodeToStatus(), - UnschedulablePlugins: sets.New(status.Plugin()), - }, - } - fitErr.Diagnosis.NodeToStatus.Set(scheduleResult.SuggestedHost, status) - return framework.NewStatus(status.Code()).WithError(fitErr) - } - return status - } - - // Any failures after this point cannot lead to the Pod being considered unschedulable. - // We define the Pod as "unschedulable" only when Pods are rejected at specific extension points, and Permit is the last one in the scheduling/binding cycle. - // If a Pod fails on PreBind or Bind, it should be moved to BackoffQ for retry. - // - // We can call Done() here because - // we can free the cluster events stored in the scheduling queue sooner, which is worth for busy clusters memory consumption wise. - sched.SchedulingQueue.Done(assumedPod.UID) - - // Run "prebind" plugins. - if status := fwk.RunPreBindPlugins(ctx, state, assumedPod, scheduleResult.SuggestedHost); !status.IsSuccess() { - return status - } - - // Run "bind" plugins. - if status := sched.bind(ctx, fwk, assumedPod, scheduleResult.SuggestedHost, state); !status.IsSuccess() { - return status - } - - // Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2. - logger.V(2).Info("Successfully bound pod to node", "pod", klog.KObj(assumedPod), "node", scheduleResult.SuggestedHost, "evaluatedNodes", scheduleResult.EvaluatedNodes, "feasibleNodes", scheduleResult.FeasibleNodes) - metrics.PodScheduled(fwk.ProfileName(), metrics.SinceInSeconds(start)) - metrics.PodSchedulingAttempts.Observe(float64(assumedPodInfo.Attempts)) - if assumedPodInfo.InitialAttemptTimestamp != nil { - metrics.PodSchedulingSLIDuration.WithLabelValues(getAttemptsLabel(assumedPodInfo)).Observe(metrics.SinceInSeconds(*assumedPodInfo.InitialAttemptTimestamp)) - } - // Run "postbind" plugins. - fwk.RunPostBindPlugins(ctx, state, assumedPod, scheduleResult.SuggestedHost) - - // At the end of a successful binding cycle, move up Pods if needed. - if len(podsToActivate.Map) != 0 { - sched.SchedulingQueue.Activate(logger, podsToActivate.Map) - // Unlike the logic in schedulingCycle(), we don't bother deleting the entries - // as `podsToActivate.Map` is no longer consumed. - } - - return nil -} - -func (sched *Scheduler) handleBindingCycleError( - ctx context.Context, - state *framework.CycleState, - fwk framework.Framework, - podInfo *framework.QueuedPodInfo, - start time.Time, - scheduleResult ScheduleResult, - status *framework.Status) { - logger := klog.FromContext(ctx) - - assumedPod := podInfo.Pod - // trigger un-reserve plugins to clean up state associated with the reserved Pod - fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost) - if forgetErr := sched.Cache.ForgetPod(logger, assumedPod); forgetErr != nil { - logger.Error(forgetErr, "scheduler cache ForgetPod failed") - } else { - // "Forget"ing an assumed Pod in binding cycle should be treated as a PodDelete event, - // as the assumed Pod had occupied a certain amount of resources in scheduler cache. - // - // Avoid moving the assumed Pod itself as it's always Unschedulable. - // It's intentional to "defer" this operation; otherwise MoveAllToActiveOrBackoffQueue() would - // add this event to in-flight events and thus move the assumed pod to backoffQ anyways if the plugins don't have appropriate QueueingHint. - if status.IsRejected() { - defer sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, assumedPod, nil, func(pod *v1.Pod) bool { - return assumedPod.UID != pod.UID - }) - } else { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, assumedPod, nil, nil) - } - } - - sched.FailureHandler(ctx, fwk, podInfo, status, clearNominatedNode, start) -} - -func (sched *Scheduler) frameworkForPod(pod *v1.Pod) (framework.Framework, error) { - fwk, ok := sched.Profiles[pod.Spec.SchedulerName] - if !ok { - return nil, fmt.Errorf("profile not found for scheduler name %q", pod.Spec.SchedulerName) - } - return fwk, nil -} - -// skipPodSchedule returns true if we could skip scheduling the pod for specified cases. -func (sched *Scheduler) skipPodSchedule(ctx context.Context, fwk framework.Framework, pod *v1.Pod) bool { - // Case 1: pod is being deleted. - if pod.DeletionTimestamp != nil { - fwk.EventRecorder().Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) - klog.FromContext(ctx).V(3).Info("Skip schedule deleting pod", "pod", klog.KObj(pod)) - return true - } - - // Case 2: pod that has been assumed could be skipped. - // An assumed pod can be added again to the scheduling queue if it got an update event - // during its previous scheduling cycle but before getting assumed. - isAssumed, err := sched.Cache.IsAssumedPod(pod) - if err != nil { - // TODO(91633): pass ctx into a revised HandleError - utilruntime.HandleError(fmt.Errorf("failed to check whether pod %s/%s is assumed: %v", pod.Namespace, pod.Name, err)) - return false - } - return isAssumed -} - -// schedulePod tries to schedule the given pod to one of the nodes in the node list. -// If it succeeds, it will return the name of the node. -// If it fails, it will return a FitError with reasons. -func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (result ScheduleResult, err error) { - trace := utiltrace.New("Scheduling", utiltrace.Field{Key: "namespace", Value: pod.Namespace}, utiltrace.Field{Key: "name", Value: pod.Name}) - defer trace.LogIfLong(100 * time.Millisecond) - if err := sched.Cache.UpdateSnapshot(klog.FromContext(ctx), sched.nodeInfoSnapshot); err != nil { - return result, err - } - trace.Step("Snapshotting scheduler cache and node infos done") - - if sched.nodeInfoSnapshot.NumNodes() == 0 { - return result, ErrNoNodesAvailable - } - - feasibleNodes, diagnosis, err := sched.findNodesThatFitPod(ctx, fwk, state, pod) - if err != nil { - return result, err - } - trace.Step("Computing predicates done") - - if len(feasibleNodes) == 0 { - return result, &framework.FitError{ - Pod: pod, - NumAllNodes: sched.nodeInfoSnapshot.NumNodes(), - Diagnosis: diagnosis, - } - } - - // When only one node after predicate, just use it. - if len(feasibleNodes) == 1 { - return ScheduleResult{ - SuggestedHost: feasibleNodes[0].Node().Name, - EvaluatedNodes: 1 + diagnosis.NodeToStatus.Len(), - FeasibleNodes: 1, - }, nil - } - - priorityList, err := prioritizeNodes(ctx, sched.Extenders, fwk, state, pod, feasibleNodes) - if err != nil { - return result, err - } - - host, _, err := selectHost(priorityList, numberOfHighestScoredNodesToReport) - trace.Step("Prioritizing done") - - return ScheduleResult{ - SuggestedHost: host, - EvaluatedNodes: len(feasibleNodes) + diagnosis.NodeToStatus.Len(), - FeasibleNodes: len(feasibleNodes), - }, err -} - -// Filters the nodes to find the ones that fit the pod based on the framework -// filter plugins and filter extenders. -func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) ([]*framework.NodeInfo, framework.Diagnosis, error) { - logger := klog.FromContext(ctx) - diagnosis := framework.Diagnosis{ - NodeToStatus: framework.NewDefaultNodeToStatus(), - } - - allNodes, err := sched.nodeInfoSnapshot.NodeInfos().List() - if err != nil { - return nil, diagnosis, err - } - // Run "prefilter" plugins. - preRes, s, unscheduledPlugins := fwk.RunPreFilterPlugins(ctx, state, pod) - diagnosis.UnschedulablePlugins = unscheduledPlugins - if !s.IsSuccess() { - if !s.IsRejected() { - return nil, diagnosis, s.AsError() - } - // All nodes in NodeToStatus will have the same status so that they can be handled in the preemption. - diagnosis.NodeToStatus.SetAbsentNodesStatus(s) - - // Record the messages from PreFilter in Diagnosis.PreFilterMsg. - msg := s.Message() - diagnosis.PreFilterMsg = msg - logger.V(5).Info("Status after running PreFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) - diagnosis.AddPluginStatus(s) - return nil, diagnosis, nil - } - - // "NominatedNodeName" can potentially be set in a previous scheduling cycle as a result of preemption. - // This node is likely the only candidate that will fit the pod, and hence we try it first before iterating over all nodes. - if len(pod.Status.NominatedNodeName) > 0 { - feasibleNodes, err := sched.evaluateNominatedNode(ctx, pod, fwk, state, diagnosis) - if err != nil { - logger.Error(err, "Evaluation failed on nominated node", "pod", klog.KObj(pod), "node", pod.Status.NominatedNodeName) - } - // Nominated node passes all the filters, scheduler is good to assign this node to the pod. - if len(feasibleNodes) != 0 { - return feasibleNodes, diagnosis, nil - } - } - - nodes := allNodes - if !preRes.AllNodes() { - nodes = make([]*framework.NodeInfo, 0, len(preRes.NodeNames)) - for nodeName := range preRes.NodeNames { - // PreRes may return nodeName(s) which do not exist; we verify - // node exists in the Snapshot. - if nodeInfo, err := sched.nodeInfoSnapshot.Get(nodeName); err == nil { - nodes = append(nodes, nodeInfo) - } - } - diagnosis.NodeToStatus.SetAbsentNodesStatus(framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node(s) didn't satisfy plugin(s) %v", sets.List(unscheduledPlugins)))) - } - feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, &diagnosis, nodes) - // always try to update the sched.nextStartNodeIndex regardless of whether an error has occurred - // this is helpful to make sure that all the nodes have a chance to be searched - processedNodes := len(feasibleNodes) + diagnosis.NodeToStatus.Len() - sched.nextStartNodeIndex = (sched.nextStartNodeIndex + processedNodes) % len(allNodes) - if err != nil { - return nil, diagnosis, err - } - - feasibleNodesAfterExtender, err := findNodesThatPassExtenders(ctx, sched.Extenders, pod, feasibleNodes, diagnosis.NodeToStatus) - if err != nil { - return nil, diagnosis, err - } - if len(feasibleNodesAfterExtender) != len(feasibleNodes) { - // Extenders filtered out some nodes. - // - // Extender doesn't support any kind of requeueing feature like EnqueueExtensions in the scheduling framework. - // When Extenders reject some Nodes and the pod ends up being unschedulable, - // we put framework.ExtenderName to pInfo.UnschedulablePlugins. - // This Pod will be requeued from unschedulable pod pool to activeQ/backoffQ - // by any kind of cluster events. - // https://github.com/kubernetes/kubernetes/issues/122019 - if diagnosis.UnschedulablePlugins == nil { - diagnosis.UnschedulablePlugins = sets.New[string]() - } - diagnosis.UnschedulablePlugins.Insert(framework.ExtenderName) - } - - return feasibleNodesAfterExtender, diagnosis, nil -} - -func (sched *Scheduler) evaluateNominatedNode(ctx context.Context, pod *v1.Pod, fwk framework.Framework, state *framework.CycleState, diagnosis framework.Diagnosis) ([]*framework.NodeInfo, error) { - nnn := pod.Status.NominatedNodeName - nodeInfo, err := sched.nodeInfoSnapshot.Get(nnn) - if err != nil { - return nil, err - } - node := []*framework.NodeInfo{nodeInfo} - feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, &diagnosis, node) - if err != nil { - return nil, err - } - - feasibleNodes, err = findNodesThatPassExtenders(ctx, sched.Extenders, pod, feasibleNodes, diagnosis.NodeToStatus) - if err != nil { - return nil, err - } - - return feasibleNodes, nil -} - -// hasScoring checks if scoring nodes is configured. -func (sched *Scheduler) hasScoring(fwk framework.Framework) bool { - if fwk.HasScorePlugins() { - return true - } - for _, extender := range sched.Extenders { - if extender.IsPrioritizer() { - return true - } - } - return false -} - -// hasExtenderFilters checks if any extenders filter nodes. -func (sched *Scheduler) hasExtenderFilters() bool { - for _, extender := range sched.Extenders { - if extender.IsFilter() { - return true - } - } - return false -} - -// findNodesThatPassFilters finds the nodes that fit the filter plugins. -func (sched *Scheduler) findNodesThatPassFilters( - ctx context.Context, - fwk framework.Framework, - state *framework.CycleState, - pod *v1.Pod, - diagnosis *framework.Diagnosis, - nodes []*framework.NodeInfo) ([]*framework.NodeInfo, error) { - numAllNodes := len(nodes) - numNodesToFind := sched.numFeasibleNodesToFind(fwk.PercentageOfNodesToScore(), int32(numAllNodes)) - if !sched.hasExtenderFilters() && !sched.hasScoring(fwk) { - numNodesToFind = 1 - } - - // Create feasible list with enough space to avoid growing it - // and allow assigning. - feasibleNodes := make([]*framework.NodeInfo, numNodesToFind) - - if !fwk.HasFilterPlugins() { - for i := range feasibleNodes { - feasibleNodes[i] = nodes[(sched.nextStartNodeIndex+i)%numAllNodes] - } - return feasibleNodes, nil - } - - errCh := parallelize.NewErrorChannel() - var feasibleNodesLen int32 - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - type nodeStatus struct { - node string - status *framework.Status - } - result := make([]*nodeStatus, numAllNodes) - checkNode := func(i int) { - // We check the nodes starting from where we left off in the previous scheduling cycle, - // this is to make sure all nodes have the same chance of being examined across pods. - nodeInfo := nodes[(sched.nextStartNodeIndex+i)%numAllNodes] - status := fwk.RunFilterPluginsWithNominatedPods(ctx, state, pod, nodeInfo) - if status.Code() == framework.Error { - errCh.SendErrorWithCancel(status.AsError(), cancel) - return - } - if status.IsSuccess() { - length := atomic.AddInt32(&feasibleNodesLen, 1) - if length > numNodesToFind { - cancel() - atomic.AddInt32(&feasibleNodesLen, -1) - } else { - feasibleNodes[length-1] = nodeInfo - } - } else { - result[i] = &nodeStatus{node: nodeInfo.Node().Name, status: status} - } - } - - beginCheckNode := time.Now() - statusCode := framework.Success - defer func() { - // We record Filter extension point latency here instead of in framework.go because framework.RunFilterPlugins - // function is called for each node, whereas we want to have an overall latency for all nodes per scheduling cycle. - // Note that this latency also includes latency for `addNominatedPods`, which calls framework.RunPreFilterAddPod. - metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Filter, statusCode.String(), fwk.ProfileName()).Observe(metrics.SinceInSeconds(beginCheckNode)) - }() - - // Stops searching for more nodes once the configured number of feasible nodes - // are found. - fwk.Parallelizer().Until(ctx, numAllNodes, checkNode, metrics.Filter) - feasibleNodes = feasibleNodes[:feasibleNodesLen] - for _, item := range result { - if item == nil { - continue - } - diagnosis.NodeToStatus.Set(item.node, item.status) - diagnosis.AddPluginStatus(item.status) - } - if err := errCh.ReceiveError(); err != nil { - statusCode = framework.Error - return feasibleNodes, err - } - return feasibleNodes, nil -} - -// numFeasibleNodesToFind returns the number of feasible nodes that once found, the scheduler stops -// its search for more feasible nodes. -func (sched *Scheduler) numFeasibleNodesToFind(percentageOfNodesToScore *int32, numAllNodes int32) (numNodes int32) { - if numAllNodes < minFeasibleNodesToFind { - return numAllNodes - } - - // Use profile percentageOfNodesToScore if it's set. Otherwise, use global percentageOfNodesToScore. - var percentage int32 - if percentageOfNodesToScore != nil { - percentage = *percentageOfNodesToScore - } else { - percentage = sched.percentageOfNodesToScore - } - - if percentage == 0 { - percentage = int32(50) - numAllNodes/125 - if percentage < minFeasibleNodesPercentageToFind { - percentage = minFeasibleNodesPercentageToFind - } - } - - numNodes = numAllNodes * percentage / 100 - if numNodes < minFeasibleNodesToFind { - return minFeasibleNodesToFind - } - - return numNodes -} - -func findNodesThatPassExtenders(ctx context.Context, extenders []framework.Extender, pod *v1.Pod, feasibleNodes []*framework.NodeInfo, statuses *framework.NodeToStatus) ([]*framework.NodeInfo, error) { - logger := klog.FromContext(ctx) - - // Extenders are called sequentially. - // Nodes in original feasibleNodes can be excluded in one extender, and pass on to the next - // extender in a decreasing manner. - for _, extender := range extenders { - if len(feasibleNodes) == 0 { - break - } - if !extender.IsInterested(pod) { - continue - } - - // Status of failed nodes in failedAndUnresolvableMap will be added to , - // so that the scheduler framework can respect the UnschedulableAndUnresolvable status for - // particular nodes, and this may eventually improve preemption efficiency. - // Note: users are recommended to configure the extenders that may return UnschedulableAndUnresolvable - // status ahead of others. - feasibleList, failedMap, failedAndUnresolvableMap, err := extender.Filter(pod, feasibleNodes) - if err != nil { - if extender.IsIgnorable() { - logger.Info("Skipping extender as it returned error and has ignorable flag set", "extender", extender, "err", err) - continue - } - return nil, err - } - - for failedNodeName, failedMsg := range failedAndUnresolvableMap { - statuses.Set(failedNodeName, framework.NewStatus(framework.UnschedulableAndUnresolvable, failedMsg)) - } - - for failedNodeName, failedMsg := range failedMap { - if _, found := failedAndUnresolvableMap[failedNodeName]; found { - // failedAndUnresolvableMap takes precedence over failedMap - // note that this only happens if the extender returns the node in both maps - continue - } - statuses.Set(failedNodeName, framework.NewStatus(framework.Unschedulable, failedMsg)) - } - - feasibleNodes = feasibleList - } - return feasibleNodes, nil -} - -// prioritizeNodes prioritizes the nodes by running the score plugins, -// which return a score for each node from the call to RunScorePlugins(). -// The scores from each plugin are added together to make the score for that node, then -// any extenders are run as well. -// All scores are finally combined (added) to get the total weighted scores of all nodes -func prioritizeNodes( - ctx context.Context, - extenders []framework.Extender, - fwk framework.Framework, - state *framework.CycleState, - pod *v1.Pod, - nodes []*framework.NodeInfo, -) ([]framework.NodePluginScores, error) { - logger := klog.FromContext(ctx) - // If no priority configs are provided, then all nodes will have a score of one. - // This is required to generate the priority list in the required format - if len(extenders) == 0 && !fwk.HasScorePlugins() { - result := make([]framework.NodePluginScores, 0, len(nodes)) - for i := range nodes { - result = append(result, framework.NodePluginScores{ - Name: nodes[i].Node().Name, - TotalScore: 1, - }) - } - return result, nil - } - - // Run PreScore plugins. - preScoreStatus := fwk.RunPreScorePlugins(ctx, state, pod, nodes) - if !preScoreStatus.IsSuccess() { - return nil, preScoreStatus.AsError() - } - - // Run the Score plugins. - nodesScores, scoreStatus := fwk.RunScorePlugins(ctx, state, pod, nodes) - if !scoreStatus.IsSuccess() { - return nil, scoreStatus.AsError() - } - - // Additional details logged at level 10 if enabled. - loggerVTen := logger.V(10) - if loggerVTen.Enabled() { - for _, nodeScore := range nodesScores { - for _, pluginScore := range nodeScore.Scores { - loggerVTen.Info("Plugin scored node for pod", "pod", klog.KObj(pod), "plugin", pluginScore.Name, "node", nodeScore.Name, "score", pluginScore.Score) - } - } - } - - if len(extenders) != 0 && nodes != nil { - // allNodeExtendersScores has all extenders scores for all nodes. - // It is keyed with node name. - allNodeExtendersScores := make(map[string]*framework.NodePluginScores, len(nodes)) - var mu sync.Mutex - var wg sync.WaitGroup - for i := range extenders { - if !extenders[i].IsInterested(pod) { - continue - } - wg.Add(1) - go func(extIndex int) { - metrics.Goroutines.WithLabelValues(metrics.PrioritizingExtender).Inc() - defer func() { - metrics.Goroutines.WithLabelValues(metrics.PrioritizingExtender).Dec() - wg.Done() - }() - prioritizedList, weight, err := extenders[extIndex].Prioritize(pod, nodes) - if err != nil { - // Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities - logger.V(5).Info("Failed to run extender's priority function. No score given by this extender.", "error", err, "pod", klog.KObj(pod), "extender", extenders[extIndex].Name()) - return - } - mu.Lock() - defer mu.Unlock() - for i := range *prioritizedList { - nodename := (*prioritizedList)[i].Host - score := (*prioritizedList)[i].Score - if loggerVTen.Enabled() { - loggerVTen.Info("Extender scored node for pod", "pod", klog.KObj(pod), "extender", extenders[extIndex].Name(), "node", nodename, "score", score) - } - - // MaxExtenderPriority may diverge from the max priority used in the scheduler and defined by MaxNodeScore, - // therefore we need to scale the score returned by extenders to the score range used by the scheduler. - finalscore := score * weight * (framework.MaxNodeScore / extenderv1.MaxExtenderPriority) - - if allNodeExtendersScores[nodename] == nil { - allNodeExtendersScores[nodename] = &framework.NodePluginScores{ - Name: nodename, - Scores: make([]framework.PluginScore, 0, len(extenders)), - } - } - allNodeExtendersScores[nodename].Scores = append(allNodeExtendersScores[nodename].Scores, framework.PluginScore{ - Name: extenders[extIndex].Name(), - Score: finalscore, - }) - allNodeExtendersScores[nodename].TotalScore += finalscore - } - }(i) - } - // wait for all go routines to finish - wg.Wait() - for i := range nodesScores { - if score, ok := allNodeExtendersScores[nodes[i].Node().Name]; ok { - nodesScores[i].Scores = append(nodesScores[i].Scores, score.Scores...) - nodesScores[i].TotalScore += score.TotalScore - } - } - } - - if loggerVTen.Enabled() { - for i := range nodesScores { - loggerVTen.Info("Calculated node's final score for pod", "pod", klog.KObj(pod), "node", nodesScores[i].Name, "score", nodesScores[i].TotalScore) - } - } - return nodesScores, nil -} - -var errEmptyPriorityList = errors.New("empty priorityList") - -// selectHost takes a prioritized list of nodes and then picks one -// in a reservoir sampling manner from the nodes that had the highest score. -// It also returns the top {count} Nodes, -// and the top of the list will be always the selected host. -func selectHost(nodeScoreList []framework.NodePluginScores, count int) (string, []framework.NodePluginScores, error) { - if len(nodeScoreList) == 0 { - return "", nil, errEmptyPriorityList - } - - var h nodeScoreHeap = nodeScoreList - heap.Init(&h) - cntOfMaxScore := 1 - selectedIndex := 0 - // The top of the heap is the NodeScoreResult with the highest score. - sortedNodeScoreList := make([]framework.NodePluginScores, 0, count) - sortedNodeScoreList = append(sortedNodeScoreList, heap.Pop(&h).(framework.NodePluginScores)) - - // This for-loop will continue until all Nodes with the highest scores get checked for a reservoir sampling, - // and sortedNodeScoreList gets (count - 1) elements. - for ns := heap.Pop(&h).(framework.NodePluginScores); ; ns = heap.Pop(&h).(framework.NodePluginScores) { - if ns.TotalScore != sortedNodeScoreList[0].TotalScore && len(sortedNodeScoreList) == count { - break - } - - if ns.TotalScore == sortedNodeScoreList[0].TotalScore { - cntOfMaxScore++ - if rand.Intn(cntOfMaxScore) == 0 { - // Replace the candidate with probability of 1/cntOfMaxScore - selectedIndex = cntOfMaxScore - 1 - } - } - - sortedNodeScoreList = append(sortedNodeScoreList, ns) - - if h.Len() == 0 { - break - } - } - - if selectedIndex != 0 { - // replace the first one with selected one - previous := sortedNodeScoreList[0] - sortedNodeScoreList[0] = sortedNodeScoreList[selectedIndex] - sortedNodeScoreList[selectedIndex] = previous - } - - if len(sortedNodeScoreList) > count { - sortedNodeScoreList = sortedNodeScoreList[:count] - } - - return sortedNodeScoreList[0].Name, sortedNodeScoreList, nil -} - -// nodeScoreHeap is a heap of framework.NodePluginScores. -type nodeScoreHeap []framework.NodePluginScores - -// nodeScoreHeap implements heap.Interface. -var _ heap.Interface = &nodeScoreHeap{} - -func (h nodeScoreHeap) Len() int { return len(h) } -func (h nodeScoreHeap) Less(i, j int) bool { return h[i].TotalScore > h[j].TotalScore } -func (h nodeScoreHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } - -func (h *nodeScoreHeap) Push(x interface{}) { - *h = append(*h, x.(framework.NodePluginScores)) -} - -func (h *nodeScoreHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - -// assume signals to the cache that a pod is already in the cache, so that binding can be asynchronous. -// assume modifies `assumed`. -func (sched *Scheduler) assume(logger klog.Logger, assumed *v1.Pod, host string) error { - // Optimistically assume that the binding will succeed and send it to apiserver - // in the background. - // If the binding fails, scheduler will release resources allocated to assumed pod - // immediately. - assumed.Spec.NodeName = host - - if err := sched.Cache.AssumePod(logger, assumed); err != nil { - logger.Error(err, "Scheduler cache AssumePod failed") - return err - } - // if "assumed" is a nominated pod, we should remove it from internal cache - if sched.SchedulingQueue != nil { - sched.SchedulingQueue.DeleteNominatedPodIfExists(assumed) - } - - return nil -} - -// bind binds a pod to a given node defined in a binding object. -// The precedence for binding is: (1) extenders and (2) framework plugins. -// We expect this to run asynchronously, so we handle binding metrics internally. -func (sched *Scheduler) bind(ctx context.Context, fwk framework.Framework, assumed *v1.Pod, targetNode string, state *framework.CycleState) (status *framework.Status) { - logger := klog.FromContext(ctx) - defer func() { - sched.finishBinding(logger, fwk, assumed, targetNode, status) - }() - - bound, err := sched.extendersBinding(logger, assumed, targetNode) - if bound { - return framework.AsStatus(err) - } - return fwk.RunBindPlugins(ctx, state, assumed, targetNode) -} - -// TODO(#87159): Move this to a Plugin. -func (sched *Scheduler) extendersBinding(logger klog.Logger, pod *v1.Pod, node string) (bool, error) { - for _, extender := range sched.Extenders { - if !extender.IsBinder() || !extender.IsInterested(pod) { - continue - } - err := extender.Bind(&v1.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name, UID: pod.UID}, - Target: v1.ObjectReference{Kind: "Node", Name: node}, - }) - if err != nil && extender.IsIgnorable() { - logger.Info("Skipping extender in bind as it returned error and has ignorable flag set", "extender", extender, "err", err) - continue - } - return true, err - } - return false, nil -} - -func (sched *Scheduler) finishBinding(logger klog.Logger, fwk framework.Framework, assumed *v1.Pod, targetNode string, status *framework.Status) { - if finErr := sched.Cache.FinishBinding(logger, assumed); finErr != nil { - logger.Error(finErr, "Scheduler cache FinishBinding failed") - } - if !status.IsSuccess() { - logger.V(1).Info("Failed to bind pod", "pod", klog.KObj(assumed)) - return - } - - fwk.EventRecorder().Eventf(assumed, nil, v1.EventTypeNormal, "Scheduled", "Binding", "Successfully assigned %v/%v to %v", assumed.Namespace, assumed.Name, targetNode) -} - -func getAttemptsLabel(p *framework.QueuedPodInfo) string { - // We breakdown the pod scheduling duration by attempts capped to a limit - // to avoid ending up with a high cardinality metric. - if p.Attempts >= 15 { - return "15+" - } - return strconv.Itoa(p.Attempts) -} - -// handleSchedulingFailure records an event for the pod that indicates the -// pod has failed to schedule. Also, update the pod condition and nominated node name if set. -func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) { - calledDone := false - defer func() { - if !calledDone { - // Basically, AddUnschedulableIfNotPresent calls DonePod internally. - // But, AddUnschedulableIfNotPresent isn't called in some corner cases. - // Here, we call DonePod explicitly to avoid leaking the pod. - sched.SchedulingQueue.Done(podInfo.Pod.UID) - } - }() - - logger := klog.FromContext(ctx) - reason := v1.PodReasonSchedulerError - if status.IsRejected() { - reason = v1.PodReasonUnschedulable - } - - switch reason { - case v1.PodReasonUnschedulable: - metrics.PodUnschedulable(fwk.ProfileName(), metrics.SinceInSeconds(start)) - case v1.PodReasonSchedulerError: - metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start)) - } - - pod := podInfo.Pod - err := status.AsError() - errMsg := status.Message() - - if err == ErrNoNodesAvailable { - logger.V(2).Info("Unable to schedule pod; no nodes are registered to the cluster; waiting", "pod", klog.KObj(pod)) - } else if fitError, ok := err.(*framework.FitError); ok { // Inject UnschedulablePlugins to PodInfo, which will be used later for moving Pods between queues efficiently. - podInfo.UnschedulablePlugins = fitError.Diagnosis.UnschedulablePlugins - podInfo.PendingPlugins = fitError.Diagnosis.PendingPlugins - logger.V(2).Info("Unable to schedule pod; no fit; waiting", "pod", klog.KObj(pod), "err", errMsg) - } else { - logger.Error(err, "Error scheduling pod; retrying", "pod", klog.KObj(pod)) - } - - // Check if the Pod exists in informer cache. - podLister := fwk.SharedInformerFactory().Core().V1().Pods().Lister() - cachedPod, e := podLister.Pods(pod.Namespace).Get(pod.Name) - if e != nil { - logger.Info("Pod doesn't exist in informer cache", "pod", klog.KObj(pod), "err", e) - // We need to call DonePod here because we don't call AddUnschedulableIfNotPresent in this case. - } else { - // In the case of extender, the pod may have been bound successfully, but timed out returning its response to the scheduler. - // It could result in the live version to carry .spec.nodeName, and that's inconsistent with the internal-queued version. - if len(cachedPod.Spec.NodeName) != 0 { - logger.Info("Pod has been assigned to node. Abort adding it back to queue.", "pod", klog.KObj(pod), "node", cachedPod.Spec.NodeName) - // We need to call DonePod here because we don't call AddUnschedulableIfNotPresent in this case. - } else { - // As is from SharedInformer, we need to do a DeepCopy() here. - // ignore this err since apiserver doesn't properly validate affinity terms - // and we can't fix the validation for backwards compatibility. - podInfo.PodInfo, _ = framework.NewPodInfo(cachedPod.DeepCopy()) - if err := sched.SchedulingQueue.AddUnschedulableIfNotPresent(logger, podInfo, sched.SchedulingQueue.SchedulingCycle()); err != nil { - logger.Error(err, "Error occurred") - } - calledDone = true - } - } - - // Update the scheduling queue with the nominated pod information. Without - // this, there would be a race condition between the next scheduling cycle - // and the time the scheduler receives a Pod Update for the nominated pod. - // Here we check for nil only for tests. - if sched.SchedulingQueue != nil { - sched.SchedulingQueue.AddNominatedPod(logger, podInfo.PodInfo, nominatingInfo) - } - - if err == nil { - // Only tests can reach here. - return - } - - msg := truncateMessage(errMsg) - fwk.EventRecorder().Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", msg) - if err := updatePod(ctx, sched.client, pod, &v1.PodCondition{ - Type: v1.PodScheduled, - ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(&pod.Status, pod.Generation, v1.PodScheduled), - Status: v1.ConditionFalse, - Reason: reason, - Message: errMsg, - }, nominatingInfo); err != nil { - logger.Error(err, "Error updating pod", "pod", klog.KObj(pod)) - } -} - -// truncateMessage truncates a message if it hits the NoteLengthLimit. -func truncateMessage(message string) string { - max := validation.NoteLengthLimit - if len(message) <= max { - return message - } - suffix := " ..." - return message[:max-len(suffix)] + suffix -} - -func updatePod(ctx context.Context, client clientset.Interface, pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *framework.NominatingInfo) error { - logger := klog.FromContext(ctx) - logger.V(3).Info("Updating pod condition", "pod", klog.KObj(pod), "conditionType", condition.Type, "conditionStatus", condition.Status, "conditionReason", condition.Reason) - podStatusCopy := pod.Status.DeepCopy() - // NominatedNodeName is updated only if we are trying to set it, and the value is - // different from the existing one. - nnnNeedsUpdate := nominatingInfo.Mode() == framework.ModeOverride && pod.Status.NominatedNodeName != nominatingInfo.NominatedNodeName - if !podutil.UpdatePodCondition(podStatusCopy, condition) && !nnnNeedsUpdate { - return nil - } - if nnnNeedsUpdate { - podStatusCopy.NominatedNodeName = nominatingInfo.NominatedNodeName - } - return util.PatchPodStatus(ctx, client, pod, podStatusCopy) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go deleted file mode 100644 index 0fd972fbc73..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go +++ /dev/null @@ -1,617 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduler - -import ( - "context" - "errors" - "fmt" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/dynamic/dynamicinformer" - "k8s.io/client-go/informers" - coreinformers "k8s.io/client-go/informers/core/v1" - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker" - "k8s.io/klog/v2" - configv1 "k8s.io/kube-scheduler/config/v1" - "k8s.io/kubernetes/pkg/features" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme" - internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache" - cachedebugger "k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger" - internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/parallelize" - frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" - frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" - "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/kubernetes/pkg/scheduler/profile" - "k8s.io/kubernetes/pkg/scheduler/util/assumecache" - "k8s.io/utils/clock" -) - -const ( - // Duration the scheduler will wait before expiring an assumed pod. - // See issue #106361 for more details about this parameter and its value. - durationToExpireAssumedPod time.Duration = 0 -) - -// ErrNoNodesAvailable is used to describe the error that no nodes available to schedule pods. -var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods") - -// Scheduler watches for new unscheduled pods. It attempts to find -// nodes that they fit on and writes bindings back to the api server. -type Scheduler struct { - // It is expected that changes made via Cache will be observed - // by NodeLister and Algorithm. - Cache internalcache.Cache - - Extenders []framework.Extender - - // NextPod should be a function that blocks until the next pod - // is available. We don't use a channel for this, because scheduling - // a pod may take some amount of time and we don't want pods to get - // stale while they sit in a channel. - NextPod func(logger klog.Logger) (*framework.QueuedPodInfo, error) - - // FailureHandler is called upon a scheduling failure. - FailureHandler FailureHandlerFn - - // SchedulePod tries to schedule the given pod to one of the nodes in the node list. - // Return a struct of ScheduleResult with the name of suggested host on success, - // otherwise will return a FitError with reasons. - SchedulePod func(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (ScheduleResult, error) - - // Close this to shut down the scheduler. - StopEverything <-chan struct{} - - // SchedulingQueue holds pods to be scheduled - SchedulingQueue internalqueue.SchedulingQueue - - // Profiles are the scheduling profiles. - Profiles profile.Map - - client clientset.Interface - - nodeInfoSnapshot *internalcache.Snapshot - - percentageOfNodesToScore int32 - - nextStartNodeIndex int - - // logger *must* be initialized when creating a Scheduler, - // otherwise logging functions will access a nil sink and - // panic. - logger klog.Logger - - // registeredHandlers contains the registrations of all handlers. It's used to check if all handlers have finished syncing before the scheduling cycles start. - registeredHandlers []cache.ResourceEventHandlerRegistration -} - -func (sched *Scheduler) applyDefaultHandlers() { - sched.SchedulePod = sched.schedulePod - sched.FailureHandler = sched.handleSchedulingFailure -} - -type schedulerOptions struct { - clock clock.WithTicker - componentConfigVersion string - kubeConfig *restclient.Config - // Overridden by profile level percentageOfNodesToScore if set in v1. - percentageOfNodesToScore int32 - podInitialBackoffSeconds int64 - podMaxBackoffSeconds int64 - podMaxInUnschedulablePodsDuration time.Duration - // Contains out-of-tree plugins to be merged with the in-tree registry. - frameworkOutOfTreeRegistry frameworkruntime.Registry - profiles []schedulerapi.KubeSchedulerProfile - extenders []schedulerapi.Extender - frameworkCapturer FrameworkCapturer - parallelism int32 - applyDefaultProfile bool -} - -// Option configures a Scheduler -type Option func(*schedulerOptions) - -// ScheduleResult represents the result of scheduling a pod. -type ScheduleResult struct { - // Name of the selected node. - SuggestedHost string - // The number of nodes the scheduler evaluated the pod against in the filtering - // phase and beyond. - EvaluatedNodes int - // The number of nodes out of the evaluated ones that fit the pod. - FeasibleNodes int - // The nominating info for scheduling cycle. - nominatingInfo *framework.NominatingInfo -} - -// WithComponentConfigVersion sets the component config version to the -// KubeSchedulerConfiguration version used. The string should be the full -// scheme group/version of the external type we converted from (for example -// "kubescheduler.config.k8s.io/v1") -func WithComponentConfigVersion(apiVersion string) Option { - return func(o *schedulerOptions) { - o.componentConfigVersion = apiVersion - } -} - -// WithKubeConfig sets the kube config for Scheduler. -func WithKubeConfig(cfg *restclient.Config) Option { - return func(o *schedulerOptions) { - o.kubeConfig = cfg - } -} - -// WithProfiles sets profiles for Scheduler. By default, there is one profile -// with the name "default-scheduler". -func WithProfiles(p ...schedulerapi.KubeSchedulerProfile) Option { - return func(o *schedulerOptions) { - o.profiles = p - o.applyDefaultProfile = false - } -} - -// WithParallelism sets the parallelism for all scheduler algorithms. Default is 16. -func WithParallelism(threads int32) Option { - return func(o *schedulerOptions) { - o.parallelism = threads - } -} - -// WithPercentageOfNodesToScore sets percentageOfNodesToScore for Scheduler. -// The default value of 0 will use an adaptive percentage: 50 - (num of nodes)/125. -func WithPercentageOfNodesToScore(percentageOfNodesToScore *int32) Option { - return func(o *schedulerOptions) { - if percentageOfNodesToScore != nil { - o.percentageOfNodesToScore = *percentageOfNodesToScore - } - } -} - -// WithFrameworkOutOfTreeRegistry sets the registry for out-of-tree plugins. Those plugins -// will be appended to the default registry. -func WithFrameworkOutOfTreeRegistry(registry frameworkruntime.Registry) Option { - return func(o *schedulerOptions) { - o.frameworkOutOfTreeRegistry = registry - } -} - -// WithPodInitialBackoffSeconds sets podInitialBackoffSeconds for Scheduler, the default value is 1 -func WithPodInitialBackoffSeconds(podInitialBackoffSeconds int64) Option { - return func(o *schedulerOptions) { - o.podInitialBackoffSeconds = podInitialBackoffSeconds - } -} - -// WithPodMaxBackoffSeconds sets podMaxBackoffSeconds for Scheduler, the default value is 10 -func WithPodMaxBackoffSeconds(podMaxBackoffSeconds int64) Option { - return func(o *schedulerOptions) { - o.podMaxBackoffSeconds = podMaxBackoffSeconds - } -} - -// WithPodMaxInUnschedulablePodsDuration sets podMaxInUnschedulablePodsDuration for PriorityQueue. -func WithPodMaxInUnschedulablePodsDuration(duration time.Duration) Option { - return func(o *schedulerOptions) { - o.podMaxInUnschedulablePodsDuration = duration - } -} - -// WithExtenders sets extenders for the Scheduler -func WithExtenders(e ...schedulerapi.Extender) Option { - return func(o *schedulerOptions) { - o.extenders = e - } -} - -// WithClock sets clock for PriorityQueue, the default clock is clock.RealClock. -func WithClock(clock clock.WithTicker) Option { - return func(o *schedulerOptions) { - o.clock = clock - } -} - -// FrameworkCapturer is used for registering a notify function in building framework. -type FrameworkCapturer func(schedulerapi.KubeSchedulerProfile) - -// WithBuildFrameworkCapturer sets a notify function for getting buildFramework details. -func WithBuildFrameworkCapturer(fc FrameworkCapturer) Option { - return func(o *schedulerOptions) { - o.frameworkCapturer = fc - } -} - -var defaultSchedulerOptions = schedulerOptions{ - clock: clock.RealClock{}, - percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore, - podInitialBackoffSeconds: int64(internalqueue.DefaultPodInitialBackoffDuration.Seconds()), - podMaxBackoffSeconds: int64(internalqueue.DefaultPodMaxBackoffDuration.Seconds()), - podMaxInUnschedulablePodsDuration: internalqueue.DefaultPodMaxInUnschedulablePodsDuration, - parallelism: int32(parallelize.DefaultParallelism), - // Ideally we would statically set the default profile here, but we can't because - // creating the default profile may require testing feature gates, which may get - // set dynamically in tests. Therefore, we delay creating it until New is actually - // invoked. - applyDefaultProfile: true, -} - -// New returns a Scheduler -func New(ctx context.Context, - client clientset.Interface, - informerFactory informers.SharedInformerFactory, - dynInformerFactory dynamicinformer.DynamicSharedInformerFactory, - recorderFactory profile.RecorderFactory, - opts ...Option) (*Scheduler, error) { - - logger := klog.FromContext(ctx) - stopEverything := ctx.Done() - - options := defaultSchedulerOptions - for _, opt := range opts { - opt(&options) - } - - if options.applyDefaultProfile { - var versionedCfg configv1.KubeSchedulerConfiguration - scheme.Scheme.Default(&versionedCfg) - cfg := schedulerapi.KubeSchedulerConfiguration{} - if err := scheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil { - return nil, err - } - options.profiles = cfg.Profiles - } - - registry := frameworkplugins.NewInTreeRegistry() - if err := registry.Merge(options.frameworkOutOfTreeRegistry); err != nil { - return nil, err - } - - metrics.Register() - - extenders, err := buildExtenders(logger, options.extenders, options.profiles) - if err != nil { - return nil, fmt.Errorf("couldn't build extenders: %w", err) - } - - podLister := informerFactory.Core().V1().Pods().Lister() - nodeLister := informerFactory.Core().V1().Nodes().Lister() - - snapshot := internalcache.NewEmptySnapshot() - metricsRecorder := metrics.NewMetricsAsyncRecorder(1000, time.Second, stopEverything) - // waitingPods holds all the pods that are in the scheduler and waiting in the permit stage - waitingPods := frameworkruntime.NewWaitingPodsMap() - - var resourceClaimCache *assumecache.AssumeCache - var resourceSliceTracker *resourceslicetracker.Tracker - var draManager framework.SharedDRAManager - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - resourceClaimInformer := informerFactory.Resource().V1beta1().ResourceClaims().Informer() - resourceClaimCache = assumecache.NewAssumeCache(logger, resourceClaimInformer, "ResourceClaim", "", nil) - resourceSliceTrackerOpts := resourceslicetracker.Options{ - EnableDeviceTaints: utilfeature.DefaultFeatureGate.Enabled(features.DRADeviceTaints), - SliceInformer: informerFactory.Resource().V1beta1().ResourceSlices(), - KubeClient: client, - } - // If device taints are disabled, the additional informers are not needed and - // the tracker turns into a simple wrapper around the slice informer. - if resourceSliceTrackerOpts.EnableDeviceTaints { - resourceSliceTrackerOpts.TaintInformer = informerFactory.Resource().V1alpha3().DeviceTaintRules() - resourceSliceTrackerOpts.ClassInformer = informerFactory.Resource().V1beta1().DeviceClasses() - } - resourceSliceTracker, err = resourceslicetracker.StartTracker(ctx, resourceSliceTrackerOpts) - if err != nil { - return nil, fmt.Errorf("couldn't start resource slice tracker: %w", err) - } - draManager = dynamicresources.NewDRAManager(ctx, resourceClaimCache, resourceSliceTracker, informerFactory) - } - - profiles, err := profile.NewMap(ctx, options.profiles, registry, recorderFactory, - frameworkruntime.WithComponentConfigVersion(options.componentConfigVersion), - frameworkruntime.WithClientSet(client), - frameworkruntime.WithKubeConfig(options.kubeConfig), - frameworkruntime.WithInformerFactory(informerFactory), - frameworkruntime.WithSharedDRAManager(draManager), - frameworkruntime.WithSnapshotSharedLister(snapshot), - frameworkruntime.WithCaptureProfile(frameworkruntime.CaptureProfile(options.frameworkCapturer)), - frameworkruntime.WithParallelism(int(options.parallelism)), - frameworkruntime.WithExtenders(extenders), - frameworkruntime.WithMetricsRecorder(metricsRecorder), - frameworkruntime.WithWaitingPods(waitingPods), - ) - if err != nil { - return nil, fmt.Errorf("initializing profiles: %v", err) - } - - if len(profiles) == 0 { - return nil, errors.New("at least one profile is required") - } - - preEnqueuePluginMap := make(map[string][]framework.PreEnqueuePlugin) - queueingHintsPerProfile := make(internalqueue.QueueingHintMapPerProfile) - var returnErr error - for profileName, profile := range profiles { - preEnqueuePluginMap[profileName] = profile.PreEnqueuePlugins() - queueingHintsPerProfile[profileName], err = buildQueueingHintMap(ctx, profile.EnqueueExtensions()) - if err != nil { - returnErr = errors.Join(returnErr, err) - } - } - - if returnErr != nil { - return nil, returnErr - } - - podQueue := internalqueue.NewSchedulingQueue( - profiles[options.profiles[0].SchedulerName].QueueSortFunc(), - informerFactory, - internalqueue.WithClock(options.clock), - internalqueue.WithPodInitialBackoffDuration(time.Duration(options.podInitialBackoffSeconds)*time.Second), - internalqueue.WithPodMaxBackoffDuration(time.Duration(options.podMaxBackoffSeconds)*time.Second), - internalqueue.WithPodLister(podLister), - internalqueue.WithPodMaxInUnschedulablePodsDuration(options.podMaxInUnschedulablePodsDuration), - internalqueue.WithPreEnqueuePluginMap(preEnqueuePluginMap), - internalqueue.WithQueueingHintMapPerProfile(queueingHintsPerProfile), - internalqueue.WithPluginMetricsSamplePercent(pluginMetricsSamplePercent), - internalqueue.WithMetricsRecorder(*metricsRecorder), - ) - - for _, fwk := range profiles { - fwk.SetPodNominator(podQueue) - fwk.SetPodActivator(podQueue) - } - - schedulerCache := internalcache.New(ctx, durationToExpireAssumedPod) - - // Setup cache debugger. - debugger := cachedebugger.New(nodeLister, podLister, schedulerCache, podQueue) - debugger.ListenForSignal(ctx) - - sched := &Scheduler{ - Cache: schedulerCache, - client: client, - nodeInfoSnapshot: snapshot, - percentageOfNodesToScore: options.percentageOfNodesToScore, - Extenders: extenders, - StopEverything: stopEverything, - SchedulingQueue: podQueue, - Profiles: profiles, - logger: logger, - } - sched.NextPod = podQueue.Pop - sched.applyDefaultHandlers() - - if err = addAllEventHandlers(sched, informerFactory, dynInformerFactory, resourceClaimCache, resourceSliceTracker, unionedGVKs(queueingHintsPerProfile)); err != nil { - return nil, fmt.Errorf("adding event handlers: %w", err) - } - - return sched, nil -} - -// defaultQueueingHintFn is the default queueing hint function. -// It always returns Queue as the queueing hint. -var defaultQueueingHintFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) (framework.QueueingHint, error) { - return framework.Queue, nil -} - -func buildQueueingHintMap(ctx context.Context, es []framework.EnqueueExtensions) (internalqueue.QueueingHintMap, error) { - queueingHintMap := make(internalqueue.QueueingHintMap) - var returnErr error - for _, e := range es { - events, err := e.EventsToRegister(ctx) - if err != nil { - returnErr = errors.Join(returnErr, err) - } - - // This will happen when plugin registers with empty events, it's usually the case a pod - // will become reschedulable only for self-update, e.g. schedulingGates plugin, the pod - // will enter into the activeQ via priorityQueue.Update(). - if len(events) == 0 { - continue - } - - // Note: Rarely, a plugin implements EnqueueExtensions but returns nil. - // We treat it as: the plugin is not interested in any event, and hence pod failed by that plugin - // cannot be moved by any regular cluster event. - // So, we can just ignore such EventsToRegister here. - - registerNodeAdded := false - registerNodeTaintUpdated := false - for _, event := range events { - fn := event.QueueingHintFn - if fn == nil || !utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) { - fn = defaultQueueingHintFn - } - - if event.Event.Resource == framework.Node { - if event.Event.ActionType&framework.Add != 0 { - registerNodeAdded = true - } - if event.Event.ActionType&framework.UpdateNodeTaint != 0 { - registerNodeTaintUpdated = true - } - } - - queueingHintMap[event.Event] = append(queueingHintMap[event.Event], &internalqueue.QueueingHintFunction{ - PluginName: e.Name(), - QueueingHintFn: fn, - }) - } - if registerNodeAdded && !registerNodeTaintUpdated { - // Temporally fix for the issue https://github.com/kubernetes/kubernetes/issues/109437 - // NodeAdded QueueingHint isn't always called because of preCheck. - // It's definitely not something expected for plugin developers, - // and registering UpdateNodeTaint event is the only mitigation for now. - // - // So, here registers UpdateNodeTaint event for plugins that has NodeAdded event, but don't have UpdateNodeTaint event. - // It has a bad impact for the requeuing efficiency though, a lot better than some Pods being stuch in the - // unschedulable pod pool. - // This behavior will be removed when we remove the preCheck feature. - // See: https://github.com/kubernetes/kubernetes/issues/110175 - queueingHintMap[framework.ClusterEvent{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}] = - append(queueingHintMap[framework.ClusterEvent{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}], - &internalqueue.QueueingHintFunction{ - PluginName: e.Name(), - QueueingHintFn: defaultQueueingHintFn, - }, - ) - } - } - if returnErr != nil { - return nil, returnErr - } - return queueingHintMap, nil -} - -// Run begins watching and scheduling. It starts scheduling and blocked until the context is done. -func (sched *Scheduler) Run(ctx context.Context) { - logger := klog.FromContext(ctx) - sched.SchedulingQueue.Run(logger) - - // We need to start scheduleOne loop in a dedicated goroutine, - // because scheduleOne function hangs on getting the next item - // from the SchedulingQueue. - // If there are no new pods to schedule, it will be hanging there - // and if done in this goroutine it will be blocking closing - // SchedulingQueue, in effect causing a deadlock on shutdown. - go wait.UntilWithContext(ctx, sched.ScheduleOne, 0) - - <-ctx.Done() - sched.SchedulingQueue.Close() - - // If the plugins satisfy the io.Closer interface, they are closed. - err := sched.Profiles.Close() - if err != nil { - logger.Error(err, "Failed to close plugins") - } -} - -// NewInformerFactory creates a SharedInformerFactory and initializes a scheduler specific -// in-place podInformer. -func NewInformerFactory(cs clientset.Interface, resyncPeriod time.Duration) informers.SharedInformerFactory { - informerFactory := informers.NewSharedInformerFactory(cs, resyncPeriod) - informerFactory.InformerFor(&v1.Pod{}, newPodInformer) - return informerFactory -} - -func buildExtenders(logger klog.Logger, extenders []schedulerapi.Extender, profiles []schedulerapi.KubeSchedulerProfile) ([]framework.Extender, error) { - var fExtenders []framework.Extender - if len(extenders) == 0 { - return nil, nil - } - - var ignoredExtendedResources []string - var ignorableExtenders []framework.Extender - for i := range extenders { - logger.V(2).Info("Creating extender", "extender", extenders[i]) - extender, err := NewHTTPExtender(&extenders[i]) - if err != nil { - return nil, err - } - if !extender.IsIgnorable() { - fExtenders = append(fExtenders, extender) - } else { - ignorableExtenders = append(ignorableExtenders, extender) - } - for _, r := range extenders[i].ManagedResources { - if r.IgnoredByScheduler { - ignoredExtendedResources = append(ignoredExtendedResources, r.Name) - } - } - } - // place ignorable extenders to the tail of extenders - fExtenders = append(fExtenders, ignorableExtenders...) - - // If there are any extended resources found from the Extenders, append them to the pluginConfig for each profile. - // This should only have an effect on ComponentConfig, where it is possible to configure Extenders and - // plugin args (and in which case the extender ignored resources take precedence). - if len(ignoredExtendedResources) == 0 { - return fExtenders, nil - } - - for i := range profiles { - prof := &profiles[i] - var found = false - for k := range prof.PluginConfig { - if prof.PluginConfig[k].Name == noderesources.Name { - // Update the existing args - pc := &prof.PluginConfig[k] - args, ok := pc.Args.(*schedulerapi.NodeResourcesFitArgs) - if !ok { - return nil, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", pc.Args) - } - args.IgnoredResources = ignoredExtendedResources - found = true - break - } - } - if !found { - return nil, fmt.Errorf("can't find NodeResourcesFitArgs in plugin config") - } - } - return fExtenders, nil -} - -type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) - -func unionedGVKs(queueingHintsPerProfile internalqueue.QueueingHintMapPerProfile) map[framework.EventResource]framework.ActionType { - gvkMap := make(map[framework.EventResource]framework.ActionType) - for _, queueingHints := range queueingHintsPerProfile { - for evt := range queueingHints { - if _, ok := gvkMap[evt.Resource]; ok { - gvkMap[evt.Resource] |= evt.ActionType - } else { - gvkMap[evt.Resource] = evt.ActionType - } - } - } - return gvkMap -} - -// newPodInformer creates a shared index informer that returns only non-terminal pods. -// The PodInformer allows indexers to be added, but note that only non-conflict indexers are allowed. -func newPodInformer(cs clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - selector := fmt.Sprintf("status.phase!=%v,status.phase!=%v", v1.PodSucceeded, v1.PodFailed) - tweakListOptions := func(options *metav1.ListOptions) { - options.FieldSelector = selector - } - informer := coreinformers.NewFilteredPodInformer(cs, metav1.NamespaceAll, resyncPeriod, cache.Indexers{}, tweakListOptions) - - // Dropping `.metadata.managedFields` to improve memory usage. - // The Extract workflow (i.e. `ExtractPod`) should be unused. - trim := func(obj interface{}) (interface{}, error) { - if accessor, err := meta.Accessor(obj); err == nil { - if accessor.GetManagedFields() != nil { - accessor.SetManagedFields(nil) - } - } - return obj, nil - } - informer.SetTransform(trim) - return informer -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/assumecache/assume_cache.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/assumecache/assume_cache.go deleted file mode 100644 index c7392129cbd..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/assumecache/assume_cache.go +++ /dev/null @@ -1,531 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package assumecache - -import ( - "errors" - "fmt" - "strconv" - "sync" - - "k8s.io/klog/v2" - - "k8s.io/apimachinery/pkg/api/meta" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/scheduler/util/queue" -) - -// Informer is the subset of [cache.SharedInformer] that NewAssumeCache depends upon. -type Informer interface { - AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) -} - -// AddTestObject adds an object to the assume cache. -// Only use this for unit testing! -func AddTestObject(cache *AssumeCache, obj interface{}) { - cache.add(obj) -} - -// UpdateTestObject updates an object in the assume cache. -// Only use this for unit testing! -func UpdateTestObject(cache *AssumeCache, obj interface{}) { - cache.update(nil, obj) -} - -// DeleteTestObject deletes object in the assume cache. -// Only use this for unit testing! -func DeleteTestObject(cache *AssumeCache, obj interface{}) { - cache.delete(obj) -} - -// Sentinel errors that can be checked for with errors.Is. -var ( - ErrWrongType = errors.New("object has wrong type") - ErrNotFound = errors.New("object not found") - ErrObjectName = errors.New("cannot determine object name") -) - -type WrongTypeError struct { - TypeName string - Object interface{} -} - -func (e WrongTypeError) Error() string { - return fmt.Sprintf("could not convert object to type %v: %+v", e.TypeName, e.Object) -} - -func (e WrongTypeError) Is(err error) bool { - return err == ErrWrongType -} - -type NotFoundError struct { - TypeName string - ObjectKey string -} - -func (e NotFoundError) Error() string { - return fmt.Sprintf("could not find %v %q", e.TypeName, e.ObjectKey) -} - -func (e NotFoundError) Is(err error) bool { - return err == ErrNotFound -} - -type ObjectNameError struct { - DetailedErr error -} - -func (e ObjectNameError) Error() string { - return fmt.Sprintf("failed to get object name: %v", e.DetailedErr) -} - -func (e ObjectNameError) Is(err error) bool { - return err == ErrObjectName -} - -// AssumeCache is a cache on top of the informer that allows for updating -// objects outside of informer events and also restoring the informer -// cache's version of the object. Objects are assumed to be -// Kubernetes API objects that are supported by [meta.Accessor]. -// -// Objects can referenced via their key, with [cache.MetaNamespaceKeyFunc] -// as key function. -// -// AssumeCache stores two pointers to represent a single object: -// - The pointer to the informer object. -// - The pointer to the latest object, which could be the same as -// the informer object, or an in-memory object. -// -// An informer update always overrides the latest object pointer. -// -// Assume() only updates the latest object pointer. -// Restore() sets the latest object pointer back to the informer object. -// Get/List() always returns the latest object pointer. -type AssumeCache struct { - // The logger that was chosen when setting up the cache. - // Will be used for all operations. - logger klog.Logger - - // Synchronizes updates to all fields below. - rwMutex sync.RWMutex - - // All registered event handlers. - eventHandlers []cache.ResourceEventHandler - handlerRegistration cache.ResourceEventHandlerRegistration - - // The eventQueue contains functions which deliver an event to one - // event handler. - // - // These functions must be invoked while *not locking* rwMutex because - // the event handlers are allowed to access the assume cache. Holding - // rwMutex then would cause a deadlock. - // - // New functions get added as part of processing a cache update while - // the rwMutex is locked. Each function which adds something to the queue - // also drains the queue before returning, therefore it is guaranteed - // that all event handlers get notified immediately (useful for unit - // testing). - // - // A channel cannot be used here because it cannot have an unbounded - // capacity. This could lead to a deadlock (writer holds rwMutex, - // gets blocked because capacity is exhausted, reader is in a handler - // which tries to lock the rwMutex). Writing into such a channel - // while not holding the rwMutex doesn't work because in-order delivery - // of events would no longer be guaranteed. - eventQueue queue.FIFO[func()] - - // describes the object stored - description string - - // Stores objInfo pointers - store cache.Indexer - - // Index function for object - indexFunc cache.IndexFunc - indexName string -} - -type objInfo struct { - // name of the object - name string - - // Latest version of object could be cached-only or from informer - latestObj interface{} - - // Latest object from informer - apiObj interface{} -} - -func objInfoKeyFunc(obj interface{}) (string, error) { - objInfo, ok := obj.(*objInfo) - if !ok { - return "", &WrongTypeError{TypeName: "objInfo", Object: obj} - } - return objInfo.name, nil -} - -func (c *AssumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) { - objInfo, ok := obj.(*objInfo) - if !ok { - return []string{""}, &WrongTypeError{TypeName: "objInfo", Object: obj} - } - return c.indexFunc(objInfo.latestObj) -} - -// NewAssumeCache creates an assume cache for general objects. -func NewAssumeCache(logger klog.Logger, informer Informer, description, indexName string, indexFunc cache.IndexFunc) *AssumeCache { - c := &AssumeCache{ - logger: logger, - description: description, - indexFunc: indexFunc, - indexName: indexName, - } - indexers := cache.Indexers{} - if indexName != "" && indexFunc != nil { - indexers[indexName] = c.objInfoIndexFunc - } - c.store = cache.NewIndexer(objInfoKeyFunc, indexers) - - // Unit tests don't use informers - if informer != nil { - // Cannot fail in practice?! No-one bothers checking the error. - c.handlerRegistration, _ = informer.AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: c.add, - UpdateFunc: c.update, - DeleteFunc: c.delete, - }, - ) - } - return c -} - -func (c *AssumeCache) add(obj interface{}) { - if obj == nil { - return - } - - name, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - c.logger.Error(&ObjectNameError{err}, "Add failed") - return - } - - defer c.emitEvents() - c.rwMutex.Lock() - defer c.rwMutex.Unlock() - - var oldObj interface{} - if objInfo, _ := c.getObjInfo(name); objInfo != nil { - newVersion, err := c.getObjVersion(name, obj) - if err != nil { - c.logger.Error(err, "Add failed: couldn't get object version") - return - } - - storedVersion, err := c.getObjVersion(name, objInfo.latestObj) - if err != nil { - c.logger.Error(err, "Add failed: couldn't get stored object version") - return - } - - // Only update object if version is newer. - // This is so we don't override assumed objects due to informer resync. - if newVersion <= storedVersion { - c.logger.V(10).Info("Skip adding object to assume cache because version is not newer than storedVersion", "description", c.description, "cacheKey", name, "newVersion", newVersion, "storedVersion", storedVersion) - return - } - oldObj = objInfo.latestObj - } - - objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj} - if err = c.store.Update(objInfo); err != nil { - c.logger.Info("Error occurred while updating stored object", "err", err) - } else { - c.logger.V(10).Info("Adding object to assume cache", "description", c.description, "cacheKey", name, "assumeCache", obj) - c.pushEvent(oldObj, obj) - } -} - -func (c *AssumeCache) update(oldObj interface{}, newObj interface{}) { - c.add(newObj) -} - -func (c *AssumeCache) delete(obj interface{}) { - if obj == nil { - return - } - - name, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - c.logger.Error(&ObjectNameError{err}, "Failed to delete") - return - } - - defer c.emitEvents() - c.rwMutex.Lock() - defer c.rwMutex.Unlock() - - var oldObj interface{} - if len(c.eventHandlers) > 0 { - if objInfo, _ := c.getObjInfo(name); objInfo != nil { - oldObj = objInfo.latestObj - } - } - - objInfo := &objInfo{name: name} - err = c.store.Delete(objInfo) - if err != nil { - c.logger.Error(err, "Failed to delete", "description", c.description, "cacheKey", name) - } - - c.pushEvent(oldObj, nil) -} - -// pushEvent gets called while the mutex is locked for writing. -// It ensures that all currently registered event handlers get -// notified about a change when the caller starts delivering -// those with emitEvents. -// -// For a delete event, newObj is nil. For an add, oldObj is nil. -// An update has both as non-nil. -func (c *AssumeCache) pushEvent(oldObj, newObj interface{}) { - for _, handler := range c.eventHandlers { - handler := handler - if oldObj == nil { - c.eventQueue.Push(func() { - handler.OnAdd(newObj, false) - }) - } else if newObj == nil { - c.eventQueue.Push(func() { - handler.OnDelete(oldObj) - }) - } else { - c.eventQueue.Push(func() { - handler.OnUpdate(oldObj, newObj) - }) - } - } -} - -func (c *AssumeCache) getObjVersion(name string, obj interface{}) (int64, error) { - objAccessor, err := meta.Accessor(obj) - if err != nil { - return -1, err - } - - objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64) - if err != nil { - //nolint:errorlint // Intentionally not wrapping the error, the underlying error is an implementation detail. - return -1, fmt.Errorf("error parsing ResourceVersion %q for %v %q: %v", objAccessor.GetResourceVersion(), c.description, name, err) - } - return objResourceVersion, nil -} - -func (c *AssumeCache) getObjInfo(key string) (*objInfo, error) { - obj, ok, err := c.store.GetByKey(key) - if err != nil { - return nil, err - } - if !ok { - return nil, &NotFoundError{TypeName: c.description, ObjectKey: key} - } - - objInfo, ok := obj.(*objInfo) - if !ok { - return nil, &WrongTypeError{"objInfo", obj} - } - return objInfo, nil -} - -// Get the object by its key. -func (c *AssumeCache) Get(key string) (interface{}, error) { - c.rwMutex.RLock() - defer c.rwMutex.RUnlock() - - objInfo, err := c.getObjInfo(key) - if err != nil { - return nil, err - } - return objInfo.latestObj, nil -} - -// GetAPIObj gets the informer cache's version by its key. -func (c *AssumeCache) GetAPIObj(key string) (interface{}, error) { - c.rwMutex.RLock() - defer c.rwMutex.RUnlock() - - objInfo, err := c.getObjInfo(key) - if err != nil { - return nil, err - } - return objInfo.apiObj, nil -} - -// List all the objects in the cache. -func (c *AssumeCache) List(indexObj interface{}) []interface{} { - c.rwMutex.RLock() - defer c.rwMutex.RUnlock() - - return c.listLocked(indexObj) -} - -func (c *AssumeCache) listLocked(indexObj interface{}) []interface{} { - allObjs := []interface{}{} - var objs []interface{} - if c.indexName != "" { - o, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj}) - if err != nil { - c.logger.Error(err, "List index error") - return nil - } - objs = o - } else { - objs = c.store.List() - } - - for _, obj := range objs { - objInfo, ok := obj.(*objInfo) - if !ok { - c.logger.Error(&WrongTypeError{TypeName: "objInfo", Object: obj}, "List error") - continue - } - allObjs = append(allObjs, objInfo.latestObj) - } - return allObjs -} - -// Assume updates the object in-memory only. -// -// The version of the object must be greater or equal to -// the current object, otherwise an error is returned. -// -// Storing an object with the same version is supported -// by the assume cache, but suffers from a race: if an -// update is received via the informer while such an -// object is assumed, it gets dropped in favor of the -// newer object from the apiserver. -// -// Only assuming objects that were returned by an apiserver -// operation (Update, Patch) is safe. -func (c *AssumeCache) Assume(obj interface{}) error { - name, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - return &ObjectNameError{err} - } - - defer c.emitEvents() - c.rwMutex.Lock() - defer c.rwMutex.Unlock() - - objInfo, err := c.getObjInfo(name) - if err != nil { - return err - } - - newVersion, err := c.getObjVersion(name, obj) - if err != nil { - return err - } - - storedVersion, err := c.getObjVersion(name, objInfo.latestObj) - if err != nil { - return err - } - - if newVersion < storedVersion { - return fmt.Errorf("%v %q is out of sync (stored: %d, assume: %d)", c.description, name, storedVersion, newVersion) - } - - c.pushEvent(objInfo.latestObj, obj) - - // Only update the cached object - objInfo.latestObj = obj - c.logger.V(4).Info("Assumed object", "description", c.description, "cacheKey", name, "version", newVersion) - return nil -} - -// Restore the informer cache's version of the object. -func (c *AssumeCache) Restore(objName string) { - defer c.emitEvents() - c.rwMutex.Lock() - defer c.rwMutex.Unlock() - - objInfo, err := c.getObjInfo(objName) - if err != nil { - // This could be expected if object got deleted - c.logger.V(5).Info("Restore object", "description", c.description, "cacheKey", objName, "err", err) - } else { - if objInfo.latestObj != objInfo.apiObj { - c.pushEvent(objInfo.latestObj, objInfo.apiObj) - objInfo.latestObj = objInfo.apiObj - } - c.logger.V(4).Info("Restored object", "description", c.description, "cacheKey", objName) - } -} - -// AddEventHandler adds an event handler to the cache. Events to a -// single handler are delivered sequentially, but there is no -// coordination between different handlers. A handler may use the -// cache. -// -// The return value can be used to wait for cache synchronization. -func (c *AssumeCache) AddEventHandler(handler cache.ResourceEventHandler) cache.ResourceEventHandlerRegistration { - defer c.emitEvents() - c.rwMutex.Lock() - defer c.rwMutex.Unlock() - - c.eventHandlers = append(c.eventHandlers, handler) - allObjs := c.listLocked(nil) - for _, obj := range allObjs { - c.eventQueue.Push(func() { - handler.OnAdd(obj, true) - }) - } - - if c.handlerRegistration == nil { - // No informer, so immediately synced. - return syncedHandlerRegistration{} - } - - return c.handlerRegistration -} - -// emitEvents delivers all pending events that are in the queue, in the order -// in which they were stored there (FIFO). -func (c *AssumeCache) emitEvents() { - for { - c.rwMutex.Lock() - deliver, ok := c.eventQueue.Pop() - c.rwMutex.Unlock() - - if !ok { - return - } - func() { - defer utilruntime.HandleCrash() - deliver() - }() - } -} - -// syncedHandlerRegistration is an implementation of ResourceEventHandlerRegistration -// which always returns true. -type syncedHandlerRegistration struct{} - -func (syncedHandlerRegistration) HasSynced() bool { return true } diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/pod_resources.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/pod_resources.go deleted file mode 100644 index e8456856069..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/pod_resources.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -// For each of these resources, a container that doesn't request the resource explicitly -// will be treated as having requested the amount indicated below, for the purpose -// of computing priority only. This ensures that when scheduling zero-request pods, such -// pods will not all be scheduled to the node with the smallest in-use request, -// and that when scheduling regular pods, such pods will not see zero-request pods as -// consuming no resources whatsoever. We chose these values to be similar to the -// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary. -// As described in #11713, we use request instead of limit to deal with resource requirements. -const ( - // DefaultMilliCPURequest defines default milli cpu request number. - DefaultMilliCPURequest int64 = 100 // 0.1 core - // DefaultMemoryRequest defines default memory request size. - DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB -) diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/queue/fifo.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/queue/fifo.go deleted file mode 100644 index ee66733fe43..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/queue/fifo.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package queue - -const ( - // normalSize limits the size of the buffer that is kept - // for reuse. - normalSize = 4 -) - -// FIFO implements a first-in-first-out queue with unbounded size. -// The null FIFO is a valid empty queue. -// -// Access must be protected by the caller when used concurrently by -// different goroutines, the queue itself implements no locking. -type FIFO[T any] struct { - // elements contains a buffer for elements which have been - // pushed and not popped yet. Two scenarios are possible: - // - one chunk in the middle (start <= end) - // - one chunk at the end, followed by one chunk at the - // beginning (end <= start) - // - // start == end can be either an empty queue or a completely - // full one (with two chunks). - elements []T - - // len counts the number of elements which have been pushed and - // not popped yet. - len int - - // start is the index of the first valid element. - start int - - // end is the index after the last valid element. - end int -} - -func (q *FIFO[T]) Len() int { - return q.len -} - -func (q *FIFO[T]) Push(element T) { - size := len(q.elements) - if q.len == size { - // Need larger buffer. - newSize := size * 2 - if newSize == 0 { - newSize = normalSize - } - elements := make([]T, newSize) - if q.start == 0 { - copy(elements, q.elements) - } else { - copy(elements, q.elements[q.start:]) - copy(elements[len(q.elements)-q.start:], q.elements[0:q.end]) - } - q.start = 0 - q.end = q.len - q.elements = elements - size = newSize - } - if q.end == size { - // Wrap around. - q.elements[0] = element - q.end = 1 - q.len++ - return - } - q.elements[q.end] = element - q.end++ - q.len++ -} - -func (q *FIFO[T]) Pop() (element T, ok bool) { - if q.len == 0 { - return - } - element = q.elements[q.start] - q.start++ - if q.start == len(q.elements) { - // Wrap around. - q.start = 0 - } - q.len-- - - // Once it is empty, shrink down to avoid hanging onto - // a large buffer forever. - if q.len == 0 && len(q.elements) > normalSize { - q.elements = make([]T, normalSize) - q.start = 0 - q.end = 0 - } - - ok = true - return -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go b/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go deleted file mode 100644 index e14b4e77453..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go +++ /dev/null @@ -1,190 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "context" - "encoding/json" - "fmt" - "time" - - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/retry" - corev1helpers "k8s.io/component-helpers/scheduling/corev1" - "k8s.io/klog/v2" - extenderv1 "k8s.io/kube-scheduler/extender/v1" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" -) - -// GetPodFullName returns a name that uniquely identifies a pod. -func GetPodFullName(pod *v1.Pod) string { - // Use underscore as the delimiter because it is not allowed in pod name - // (DNS subdomain format). - return pod.Name + "_" + pod.Namespace -} - -// GetPodStartTime returns start time of the given pod or current timestamp -// if it hasn't started yet. -func GetPodStartTime(pod *v1.Pod) *metav1.Time { - if pod.Status.StartTime != nil { - return pod.Status.StartTime - } - // Assumed pods and bound pods that haven't started don't have a StartTime yet. - return &metav1.Time{Time: time.Now()} -} - -// GetEarliestPodStartTime returns the earliest start time of all pods that -// have the highest priority among all victims. -func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time { - if len(victims.Pods) == 0 { - // should not reach here. - klog.Background().Error(nil, "victims.Pods is empty. Should not reach here") - return nil - } - - earliestPodStartTime := GetPodStartTime(victims.Pods[0]) - maxPriority := corev1helpers.PodPriority(victims.Pods[0]) - - for _, pod := range victims.Pods { - if podPriority := corev1helpers.PodPriority(pod); podPriority == maxPriority { - if podStartTime := GetPodStartTime(pod); podStartTime.Before(earliestPodStartTime) { - earliestPodStartTime = podStartTime - } - } else if podPriority > maxPriority { - maxPriority = podPriority - earliestPodStartTime = GetPodStartTime(pod) - } - } - - return earliestPodStartTime -} - -// MoreImportantPod return true when priority of the first pod is higher than -// the second one. If two pods' priorities are equal, compare their StartTime. -// It takes arguments of the type "interface{}" to be used with SortableList, -// but expects those arguments to be *v1.Pod. -func MoreImportantPod(pod1, pod2 *v1.Pod) bool { - p1 := corev1helpers.PodPriority(pod1) - p2 := corev1helpers.PodPriority(pod2) - if p1 != p2 { - return p1 > p2 - } - return GetPodStartTime(pod1).Before(GetPodStartTime(pod2)) -} - -// Retriable defines the retriable errors during a scheduling cycle. -func Retriable(err error) bool { - return apierrors.IsInternalError(err) || apierrors.IsServiceUnavailable(err) || - net.IsConnectionRefused(err) -} - -// PatchPodStatus calculates the delta bytes change from to , -// and then submit a request to API server to patch the pod changes. -func PatchPodStatus(ctx context.Context, cs kubernetes.Interface, old *v1.Pod, newStatus *v1.PodStatus) error { - if newStatus == nil { - return nil - } - - oldData, err := json.Marshal(v1.Pod{Status: old.Status}) - if err != nil { - return err - } - - newData, err := json.Marshal(v1.Pod{Status: *newStatus}) - if err != nil { - return err - } - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Pod{}) - if err != nil { - return fmt.Errorf("failed to create merge patch for pod %q/%q: %v", old.Namespace, old.Name, err) - } - - if "{}" == string(patchBytes) { - return nil - } - - patchFn := func() error { - _, err := cs.CoreV1().Pods(old.Namespace).Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") - return err - } - - return retry.OnError(retry.DefaultBackoff, Retriable, patchFn) -} - -// DeletePod deletes the given from API server -func DeletePod(ctx context.Context, cs kubernetes.Interface, pod *v1.Pod) error { - return cs.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) -} - -// ClearNominatedNodeName internally submit a patch request to API server -// to set each pods[*].Status.NominatedNodeName> to "". -func ClearNominatedNodeName(ctx context.Context, cs kubernetes.Interface, pods ...*v1.Pod) utilerrors.Aggregate { - var errs []error - for _, p := range pods { - if len(p.Status.NominatedNodeName) == 0 { - continue - } - podStatusCopy := p.Status.DeepCopy() - podStatusCopy.NominatedNodeName = "" - if err := PatchPodStatus(ctx, cs, p, podStatusCopy); err != nil { - errs = append(errs, err) - } - } - return utilerrors.NewAggregate(errs) -} - -// IsScalarResourceName validates the resource for Extended, Hugepages, Native and AttachableVolume resources -func IsScalarResourceName(name v1.ResourceName) bool { - return v1helper.IsExtendedResourceName(name) || v1helper.IsHugePageResourceName(name) || - v1helper.IsPrefixedNativeResource(name) || v1helper.IsAttachableVolumeResourceName(name) -} - -// As converts two objects to the given type. -// Both objects must be of the same type. If not, an error is returned. -// nil objects are allowed and will be converted to nil. -// For oldObj, cache.DeletedFinalStateUnknown is handled and the -// object stored in it will be converted instead. -func As[T any](oldObj, newobj interface{}) (T, T, error) { - var oldTyped T - var newTyped T - var ok bool - if newobj != nil { - newTyped, ok = newobj.(T) - if !ok { - return oldTyped, newTyped, fmt.Errorf("expected %T, but got %T", newTyped, newobj) - } - } - - if oldObj != nil { - if realOldObj, ok := oldObj.(cache.DeletedFinalStateUnknown); ok { - oldObj = realOldObj.Obj - } - oldTyped, ok = oldObj.(T) - if !ok { - return oldTyped, newTyped, fmt.Errorf("expected %T, but got %T", oldTyped, oldObj) - } - } - return oldTyped, newTyped, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/OWNERS deleted file mode 100644 index 123e203f90e..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-node-approvers - - tallclair -reviewers: - - sig-node-reviewers - - tallclair -labels: - - sig/node diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go b/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go deleted file mode 100644 index eeaa3955dd3..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apparmor - -import ( - "strings" - - v1 "k8s.io/api/core/v1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" -) - -// Checks whether app armor is required for the pod to run. AppArmor is considered required if any -// non-unconfined profiles are specified. -func isRequired(pod *v1.Pod) bool { - if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.AppArmorProfile != nil && - pod.Spec.SecurityContext.AppArmorProfile.Type != v1.AppArmorProfileTypeUnconfined { - return true - } - - inUse := !podutil.VisitContainers(&pod.Spec, podutil.AllContainers, func(c *v1.Container, _ podutil.ContainerType) bool { - if c.SecurityContext != nil && c.SecurityContext.AppArmorProfile != nil && - c.SecurityContext.AppArmorProfile.Type != v1.AppArmorProfileTypeUnconfined { - return false // is in use; short-circuit - } - return true - }) - if inUse { - return true - } - - for key, value := range pod.Annotations { - if strings.HasPrefix(key, v1.DeprecatedAppArmorBetaContainerAnnotationKeyPrefix) { - return value != v1.DeprecatedAppArmorBetaProfileNameUnconfined - } - } - return false -} - -// GetProfileName returns the name of the profile to use with the container. -func GetProfile(pod *v1.Pod, container *v1.Container) *v1.AppArmorProfile { - if container.SecurityContext != nil && container.SecurityContext.AppArmorProfile != nil { - return container.SecurityContext.AppArmorProfile - } - - // Static pods may not have had annotations synced to fields, so fallback to annotations before - // the pod profile. - if profile := getProfileFromPodAnnotations(pod.Annotations, container.Name); profile != nil { - return profile - } - - if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.AppArmorProfile != nil { - return pod.Spec.SecurityContext.AppArmorProfile - } - - return nil -} - -// getProfileFromPodAnnotations gets the AppArmor profile to use with container from -// (deprecated) pod annotations. -func getProfileFromPodAnnotations(annotations map[string]string, containerName string) *v1.AppArmorProfile { - val, ok := annotations[v1.DeprecatedAppArmorBetaContainerAnnotationKeyPrefix+containerName] - if !ok { - return nil - } - - switch { - case val == v1.DeprecatedAppArmorBetaProfileRuntimeDefault: - return &v1.AppArmorProfile{Type: v1.AppArmorProfileTypeRuntimeDefault} - - case val == v1.DeprecatedAppArmorBetaProfileNameUnconfined: - return &v1.AppArmorProfile{Type: v1.AppArmorProfileTypeUnconfined} - - case strings.HasPrefix(val, v1.DeprecatedAppArmorBetaProfileNamePrefix): - // Note: an invalid empty localhost profile will be rejected by kubelet admission. - profileName := strings.TrimPrefix(val, v1.DeprecatedAppArmorBetaProfileNamePrefix) - return &v1.AppArmorProfile{ - Type: v1.AppArmorProfileTypeLocalhost, - LocalhostProfile: &profileName, - } - - default: - // Invalid annotation. - return nil - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go b/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go deleted file mode 100644 index 34113df71b0..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apparmor - -import ( - "errors" - "fmt" - "strings" - - v1 "k8s.io/api/core/v1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/third_party/forked/libcontainer/apparmor" -) - -// Whether AppArmor should be disabled by default. -// Set to true if the wrong build tags are set (see validate_disabled.go). -var isDisabledBuild bool - -// Validator is a interface for validating that a pod with an AppArmor profile can be run by a Node. -type Validator interface { - Validate(pod *v1.Pod) error - ValidateHost() error -} - -// NewValidator is in order to find AppArmor FS -func NewValidator() Validator { - if err := validateHost(); err != nil { - return &validator{validateHostErr: err} - } - return &validator{} -} - -type validator struct { - validateHostErr error -} - -func (v *validator) Validate(pod *v1.Pod) error { - if !isRequired(pod) { - return nil - } - - if v.ValidateHost() != nil { - return v.validateHostErr - } - - var retErr error - podutil.VisitContainers(&pod.Spec, podutil.AllContainers, func(container *v1.Container, containerType podutil.ContainerType) bool { - profile := GetProfile(pod, container) - if profile == nil { - return true - } - - // TODO(#64841): This would ideally be part of validation.ValidateAppArmorProfileFormat, but - // that is called for API validation, and this is tightening validation. - if profile.Type == v1.AppArmorProfileTypeLocalhost { - if profile.LocalhostProfile == nil || strings.TrimSpace(*profile.LocalhostProfile) == "" { - retErr = fmt.Errorf("invalid empty AppArmor profile name: %q", profile) - return false - } - } - return true - }) - - return retErr -} - -// ValidateHost verifies that the host and runtime is capable of enforcing AppArmor profiles. -// Note, this is intentionally only check the host at kubelet startup and never re-evaluates the host -// as the expectation is that the kubelet restart will be needed to enable or disable AppArmor support. -func (v *validator) ValidateHost() error { - return v.validateHostErr -} - -// validateHost verifies that the host and runtime is capable of enforcing AppArmor profiles. -func validateHost() error { - // Check build support. - if isDisabledBuild { - return errors.New("binary not compiled for linux") - } - - // Check kernel support. - if !apparmor.IsEnabled() { - return errors.New("AppArmor is not enabled on the host") - } - - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go b/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go deleted file mode 100644 index 91a82c2ef82..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !linux -// +build !linux - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apparmor - -func init() { - // If Kubernetes was not built for linux, apparmor is always disabled. - isDisabledBuild = true -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/probe/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/OWNERS similarity index 100% rename from e2e/vendor/k8s.io/kubernetes/pkg/probe/OWNERS rename to e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/OWNERS diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go index ef1cd13a8e0..9eb7d581b2f 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go @@ -35,6 +35,7 @@ type PodSecurityContextAccessor interface { SeccompProfile() *api.SeccompProfile SupplementalGroups() []int64 FSGroup() *int64 + FSGroupChangePolicy() *api.PodFSGroupChangePolicy } // PodSecurityContextMutator allows reading and writing the values of a PodSecurityContext object @@ -52,6 +53,7 @@ type PodSecurityContextMutator interface { SetSeccompProfile(*api.SeccompProfile) SetSupplementalGroups([]int64) SetFSGroup(*int64) + SetFSGroupChangePolicy(*api.PodFSGroupChangePolicy) // PodSecurityContext returns the current PodSecurityContext object PodSecurityContext() *api.PodSecurityContext @@ -231,6 +233,23 @@ func (w *podSecurityContextWrapper) SetFSGroup(v *int64) { w.podSC.FSGroup = v } +func (w *podSecurityContextWrapper) FSGroupChangePolicy() *api.PodFSGroupChangePolicy { + if w.podSC == nil { + return nil + } + + return w.podSC.FSGroupChangePolicy +} + +func (w *podSecurityContextWrapper) SetFSGroupChangePolicy(v *api.PodFSGroupChangePolicy) { + if w.podSC == nil && v == nil { + return + } + + w.ensurePodSC() + w.podSC.FSGroupChangePolicy = v +} + // ContainerSecurityContextAccessor allows reading the values of a SecurityContext object type ContainerSecurityContextAccessor interface { Capabilities() *api.Capabilities diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go index 28771b6df27..5e000f93333 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go @@ -17,6 +17,10 @@ limitations under the License. package securitycontext import ( + "fmt" + "os" + "sync" + v1 "k8s.io/api/core/v1" ) @@ -188,21 +192,32 @@ func AddNoNewPrivileges(sc *v1.SecurityContext) bool { var ( // These *must* be kept in sync with moby/moby. - // https://github.com/moby/moby/blob/master/oci/defaults.go#L105-L124 - // @jessfraz will watch changes to those files upstream. - defaultMaskedPaths = []string{ - "/proc/asound", - "/proc/acpi", - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/proc/scsi", - "/sys/firmware", - "/sys/devices/virtual/powercap", - } + // https://github.com/moby/moby/blob/ecb03c4cdae6f323150fc11b303dcc5dc4d82416/oci/defaults.go#L190-L218 + defaultMaskedPaths = sync.OnceValue(func() []string { + maskedPaths := []string{ + "/proc/asound", + "/proc/acpi", + "/proc/interrupts", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + "/sys/devices/virtual/powercap", + } + + for _, cpu := range possibleCPUs() { + path := fmt.Sprintf("/sys/devices/system/cpu/cpu%d/thermal_throttle", cpu) + if _, err := os.Stat(path); err == nil { + maskedPaths = append(maskedPaths, path) + } + } + + return maskedPaths + }) defaultReadonlyPaths = []string{ "/proc/bus", "/proc/fs", @@ -221,7 +236,7 @@ func ConvertToRuntimeMaskedPaths(opt *v1.ProcMountType) []string { } // Otherwise, add the default masked paths to the runtime security context. - return defaultMaskedPaths + return defaultMaskedPaths() } // ConvertToRuntimeReadonlyPaths converts the ProcMountType to the specified or default diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util_darwin.go similarity index 83% rename from e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/doc.go rename to e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util_darwin.go index 0f119d2f568..9d14502acb7 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/kubelet/util/doc.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util_darwin.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright 2025 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package util holds utility functions. -package util +package securitycontext + +func possibleCPUs() []int { + return nil +} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util_linux.go new file mode 100644 index 00000000000..278c81ee2c1 --- /dev/null +++ b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util_linux.go @@ -0,0 +1,74 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitycontext + +import ( + "os" + "runtime" + "strconv" + "strings" + "sync" +) + +// possibleCPUs returns the number of possible CPUs on this host. +func possibleCPUs() (cpus []int) { + if ncpu := possibleCPUsParsed(); ncpu != nil { + return ncpu + } + + for i := range runtime.NumCPU() { + cpus = append(cpus, i) + } + + return cpus +} + +// possibleCPUsParsed is parsing the amount of possible CPUs on this host from +// /sys/devices. +var possibleCPUsParsed = sync.OnceValue(func() (cpus []int) { + data, err := os.ReadFile("/sys/devices/system/cpu/possible") + if err != nil { + return nil + } + + ranges := strings.SplitSeq(strings.TrimSpace(string(data)), ",") + + for r := range ranges { + if rStart, rEnd, ok := strings.Cut(r, "-"); !ok { + cpu, err := strconv.Atoi(rStart) + if err != nil { + return nil + } + cpus = append(cpus, cpu) + } else { + var start, end int + start, err := strconv.Atoi(rStart) + if err != nil { + return nil + } + end, err = strconv.Atoi(rEnd) + if err != nil { + return nil + } + for i := start; i <= end; i++ { + cpus = append(cpus, i) + } + } + } + + return cpus +}) diff --git a/e2e/vendor/k8s.io/apiserver/pkg/storage/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util_windows.go similarity index 83% rename from e2e/vendor/k8s.io/apiserver/pkg/storage/doc.go rename to e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util_windows.go index d2c5dbfc456..9d14502acb7 100644 --- a/e2e/vendor/k8s.io/apiserver/pkg/storage/doc.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/securitycontext/util_windows.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright 2025 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Interfaces for database-related operations. -package storage +package securitycontext + +func possibleCPUs() []int { + return nil +} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go deleted file mode 100644 index 0dfd11fb435..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -// DefaultFs implements Filesystem using same-named functions from "os" and "io" -type DefaultFs struct { - root string -} - -var _ Filesystem = &DefaultFs{} - -// NewTempFs returns a fake Filesystem in temporary directory, useful for unit tests -func NewTempFs() Filesystem { - path, _ := os.MkdirTemp("", "tmpfs") - return &DefaultFs{ - root: path, - } -} - -func (fs *DefaultFs) prefix(path string) string { - if len(fs.root) == 0 { - return path - } - return filepath.Join(fs.root, path) -} - -// Stat via os.Stat -func (fs *DefaultFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(fs.prefix(name)) -} - -// Create via os.Create -func (fs *DefaultFs) Create(name string) (File, error) { - file, err := os.Create(fs.prefix(name)) - if err != nil { - return nil, err - } - return &defaultFile{file}, nil -} - -// Rename via os.Rename -func (fs *DefaultFs) Rename(oldpath, newpath string) error { - if !strings.HasPrefix(oldpath, fs.root) { - oldpath = fs.prefix(oldpath) - } - if !strings.HasPrefix(newpath, fs.root) { - newpath = fs.prefix(newpath) - } - return os.Rename(oldpath, newpath) -} - -func (fs *DefaultFs) MkdirAll(path string, perm os.FileMode) error { - return MkdirAll(fs.prefix(path), perm) -} - -// MkdirAllWithPathCheck checks if path exists already. If not, it creates a directory -// named path, along with any necessary parents, and returns nil, or else returns an error. -// Permission bits perm (before umask) are used for all directories that -// MkdirAllWithPathCheck creates. -// If path is already a directory, MkdirAllWithPathCheck does nothing and returns nil. -// NOTE: In case of Windows NTFS, mount points are implemented as reparse-point -// (similar to symlink) and do not represent actual directory. Hence Directory existence -// check for windows NTFS will NOT check for dir, but for symlink presence. -func MkdirAllWithPathCheck(path string, perm os.FileMode) error { - if dir, err := os.Lstat(path); err == nil { - // If the path exists already, - // 1. for Unix/Linux OS, check if the path is directory. - // 2. for windows NTFS, check if the path is symlink instead of directory. - if dir.IsDir() || - (runtime.GOOS == "windows" && (dir.Mode()&os.ModeSymlink != 0 || dir.Mode()&os.ModeIrregular != 0)) { - return nil - } - return fmt.Errorf("path %v exists but is not a directory", path) - } - // If existence of path not known, attempt to create it. - if err := MkdirAll(path, perm); err != nil { - return err - } - return nil -} - -// Chtimes via os.Chtimes -func (fs *DefaultFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(fs.prefix(name), atime, mtime) -} - -// RemoveAll via os.RemoveAll -func (fs *DefaultFs) RemoveAll(path string) error { - return os.RemoveAll(fs.prefix(path)) -} - -// Remove via os.Remove -func (fs *DefaultFs) Remove(name string) error { - return os.Remove(fs.prefix(name)) -} - -// ReadFile via os.ReadFile -func (fs *DefaultFs) ReadFile(filename string) ([]byte, error) { - return os.ReadFile(fs.prefix(filename)) -} - -// TempDir via os.MkdirTemp -func (fs *DefaultFs) TempDir(dir, prefix string) (string, error) { - return os.MkdirTemp(fs.prefix(dir), prefix) -} - -// TempFile via os.CreateTemp -func (fs *DefaultFs) TempFile(dir, prefix string) (File, error) { - file, err := os.CreateTemp(fs.prefix(dir), prefix) - if err != nil { - return nil, err - } - return &defaultFile{file}, nil -} - -// ReadDir via os.ReadDir -func (fs *DefaultFs) ReadDir(dirname string) ([]os.DirEntry, error) { - return os.ReadDir(fs.prefix(dirname)) -} - -// Walk via filepath.Walk -func (fs *DefaultFs) Walk(root string, walkFn filepath.WalkFunc) error { - return filepath.Walk(fs.prefix(root), walkFn) -} - -// defaultFile implements File using same-named functions from "os" -type defaultFile struct { - file *os.File -} - -// Name via os.File.Name -func (file *defaultFile) Name() string { - return file.file.Name() -} - -// Write via os.File.Write -func (file *defaultFile) Write(b []byte) (n int, err error) { - return file.file.Write(b) -} - -// Sync via os.File.Sync -func (file *defaultFile) Sync() error { - return file.file.Sync() -} - -// Close via os.File.Close -func (file *defaultFile) Close() error { - return file.file.Close() -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go deleted file mode 100644 index 6408e0fa838..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "os" - "path/filepath" - "time" -) - -// Filesystem is an interface that we can use to mock various filesystem operations -type Filesystem interface { - // from "os" - Stat(name string) (os.FileInfo, error) - Create(name string) (File, error) - Rename(oldpath, newpath string) error - MkdirAll(path string, perm os.FileMode) error - Chtimes(name string, atime time.Time, mtime time.Time) error - RemoveAll(path string) error - Remove(name string) error - - // from "os" - ReadFile(filename string) ([]byte, error) - TempDir(dir, prefix string) (string, error) - TempFile(dir, prefix string) (File, error) - ReadDir(dirname string) ([]os.DirEntry, error) - Walk(root string, walkFn filepath.WalkFunc) error -} - -// File is an interface that we can use to mock various filesystem operations typically -// accessed through the File object from the "os" package -type File interface { - // for now, the only os.File methods used are those below, add more as necessary - Name() string - Write(b []byte) (n int, err error) - Sync() error - Close() error -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/util.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/util.go deleted file mode 100644 index d64fc37c71b..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/util.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "path/filepath" -) - -// IsPathClean will replace slashes to Separator (which is OS-specific). -// This will make sure that all slashes are the same before comparing. -func IsPathClean(path string) bool { - return filepath.ToSlash(filepath.Clean(path)) == filepath.ToSlash(path) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go deleted file mode 100644 index d27d2640f33..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build freebsd || linux || darwin -// +build freebsd linux darwin - -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "fmt" - "os" - "path/filepath" -) - -// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file -func IsUnixDomainSocket(filePath string) (bool, error) { - fi, err := os.Stat(filePath) - if err != nil { - return false, fmt.Errorf("stat file %s failed: %v", filePath, err) - } - if fi.Mode()&os.ModeSocket == 0 { - return false, nil - } - return true, nil -} - -// Chmod is the same as os.Chmod on Unix. -func Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -// MkdirAll is same as os.MkdirAll on Unix. -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is same as filepath.IsAbs on Unix. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go deleted file mode 100644 index 95616d34f6c..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go +++ /dev/null @@ -1,263 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "fmt" - "net" - "os" - "path/filepath" - "strings" - "time" - - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" - - "golang.org/x/sys/windows" -) - -const ( - // Amount of time to wait between attempting to use a Unix domain socket. - // As detailed in https://github.com/kubernetes/kubernetes/issues/104584 - // the first attempt will most likely fail, hence the need to retry - socketDialRetryPeriod = 1 * time.Second - // Overall timeout value to dial a Unix domain socket, including retries - socketDialTimeout = 4 * time.Second -) - -// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file -// Note that due to the retry logic inside, it could take up to 4 seconds -// to determine whether or not the file path supplied is a Unix domain socket -func IsUnixDomainSocket(filePath string) (bool, error) { - // Note that querrying for the Reparse Points (https://docs.microsoft.com/en-us/windows/win32/fileio/reparse-points) - // for the file (using FSCTL_GET_REPARSE_POINT) and checking for reparse tag: reparseTagSocket - // does NOT work in 1809 if the socket file is created within a bind mounted directory by a container - // and the FSCTL is issued in the host by the kubelet. - - // If the file does not exist, it cannot be a Unix domain socket. - if info, err := os.Stat(filePath); os.IsNotExist(err) { - return false, fmt.Errorf("File %s not found. Err: %v", filePath, err) - } else if err == nil && info.Mode()&os.ModeSocket != 0 { // Use os.ModeSocket (introduced in Go 1.23 on Windows) - klog.V(6).InfoS("File identified as a Unix domain socket", "filePath", filePath) - return true, nil - } - klog.V(6).InfoS("Function IsUnixDomainSocket starts", "filePath", filePath) - // Due to the absence of golang support for os.ModeSocket in Windows (https://github.com/golang/go/issues/33357) - // we need to dial the file and check if we receive an error to determine if a file is Unix Domain Socket file. - // As detailed in https://github.com/kubernetes/kubernetes/issues/104584 we cannot rely - // on the Unix Domain socket working on the very first try, hence the potential need to - // dial multiple times - var lastSocketErr error - err := wait.PollImmediate(socketDialRetryPeriod, socketDialTimeout, - func() (bool, error) { - klog.V(6).InfoS("Dialing the socket", "filePath", filePath) - var c net.Conn - c, lastSocketErr = net.Dial("unix", filePath) - if lastSocketErr == nil { - c.Close() - klog.V(6).InfoS("Socket dialed successfully", "filePath", filePath) - return true, nil - } - klog.V(6).InfoS("Failed the current attempt to dial the socket, so pausing before retry", - "filePath", filePath, "err", lastSocketErr, "socketDialRetryPeriod", - socketDialRetryPeriod) - return false, nil - }) - - // PollImmediate will return "timed out waiting for the condition" if the function it - // invokes never returns true - if err != nil { - klog.V(2).InfoS("Failed all attempts to dial the socket so marking it as a non-Unix Domain socket. Last socket error along with the error from PollImmediate follow", - "filePath", filePath, "lastSocketErr", lastSocketErr, "err", err) - return false, nil - } - return true, nil -} - -// On Windows os.Mkdir all doesn't set any permissions so call the Chown function below to set -// permissions once the directory is created. -func MkdirAll(path string, perm os.FileMode) error { - klog.V(6).InfoS("Function MkdirAll starts", "path", path, "perm", perm) - if _, err := os.Stat(path); err == nil { - // Path already exists: nothing to do. - return nil - } else if !os.IsNotExist(err) { - return fmt.Errorf("error checking path %s: %w", path, err) - } - - err := os.MkdirAll(path, perm) - if err != nil { - return fmt.Errorf("error creating directory %s: %w", path, err) - } - - err = Chmod(path, perm) - if err != nil { - return fmt.Errorf("error setting permissions for directory %s: %w", path, err) - } - - return nil -} - -const ( - // These aren't defined in the syscall package for Windows :( - USER_READ = 0x100 - USER_WRITE = 0x80 - USER_EXECUTE = 0x40 - GROUP_READ = 0x20 - GROUP_WRITE = 0x10 - GROUP_EXECUTE = 0x8 - OTHERS_READ = 0x4 - OTHERS_WRITE = 0x2 - OTHERS_EXECUTE = 0x1 - USER_ALL = USER_READ | USER_WRITE | USER_EXECUTE - GROUP_ALL = GROUP_READ | GROUP_WRITE | GROUP_EXECUTE - OTHERS_ALL = OTHERS_READ | OTHERS_WRITE | OTHERS_EXECUTE -) - -// On Windows os.Chmod only sets the read-only flag on files, so we need to use Windows APIs to set the desired access on files / directories. -// The OWNER mode will set file permissions for the file owner SID, the GROUP mode will set file permissions for the file group SID, -// and the OTHERS mode will set file permissions for BUILTIN\Users. -// Please note that Windows containers can be run as one of two user accounts; ContainerUser or ContainerAdministrator. -// Containers run as ContainerAdministrator will inherit permissions from BUILTIN\Administrators, -// while containers run as ContainerUser will inherit permissions from BUILTIN\Users. -// Windows containers do not have the ability to run as a custom user account that is known to the host so the OTHERS group mode -// is used to grant / deny permissions of files on the hosts to the ContainerUser account. -func Chmod(path string, filemode os.FileMode) error { - klog.V(6).InfoS("Function Chmod starts", "path", path, "filemode", filemode) - // Get security descriptor for the file - sd, err := windows.GetNamedSecurityInfo( - path, - windows.SE_FILE_OBJECT, - windows.DACL_SECURITY_INFORMATION|windows.PROTECTED_DACL_SECURITY_INFORMATION|windows.OWNER_SECURITY_INFORMATION|windows.GROUP_SECURITY_INFORMATION) - if err != nil { - return fmt.Errorf("Error getting security descriptor for file %s: %v", path, err) - } - - // Get owner SID from the security descriptor for assigning USER permissions - owner, _, err := sd.Owner() - if err != nil { - return fmt.Errorf("Error getting owner SID for file %s: %v", path, err) - } - ownerString := owner.String() - - // Get the group SID from the security descriptor for assigning GROUP permissions - group, _, err := sd.Group() - if err != nil { - return fmt.Errorf("Error getting group SID for file %s: %v", path, err) - } - groupString := group.String() - - mask := uint32(windows.ACCESS_MASK(filemode)) - - // Build a new Discretionary Access Control List (DACL) with the desired permissions using - //the Security Descriptor Definition Language (SDDL) format. - // https://learn.microsoft.com/windows/win32/secauthz/security-descriptor-definition-language - // the DACL is a list of Access Control Entries (ACEs) where each ACE represents the permissions (Allow or Deny) for a specific SID. - // Each ACE has the following format: - // (AceType;AceFlags;Rights;ObjectGuid;InheritObjectGuid;AccountSid) - // We can leave ObjectGuid and InheritObjectGuid empty for our purposes. - - dacl := "D:" - - // build the owner ACE - dacl += "(A;OICI;" - if mask&USER_ALL == USER_ALL { - dacl += "FA" - } else { - if mask&USER_READ == USER_READ { - dacl += "FR" - } - if mask&USER_WRITE == USER_WRITE { - dacl += "FW" - } - if mask&USER_EXECUTE == USER_EXECUTE { - dacl += "FX" - } - } - dacl += ";;;" + ownerString + ")" - - // Build the group ACE - dacl += "(A;OICI;" - if mask&GROUP_ALL == GROUP_ALL { - dacl += "FA" - } else { - if mask&GROUP_READ == GROUP_READ { - dacl += "FR" - } - if mask&GROUP_WRITE == GROUP_WRITE { - dacl += "FW" - } - if mask&GROUP_EXECUTE == GROUP_EXECUTE { - dacl += "FX" - } - } - dacl += ";;;" + groupString + ")" - - // Build the others ACE - dacl += "(A;OICI;" - if mask&OTHERS_ALL == OTHERS_ALL { - dacl += "FA" - } else { - if mask&OTHERS_READ == OTHERS_READ { - dacl += "FR" - } - if mask&OTHERS_WRITE == OTHERS_WRITE { - dacl += "FW" - } - if mask&OTHERS_EXECUTE == OTHERS_EXECUTE { - dacl += "FX" - } - } - dacl += ";;;BU)" - - klog.V(6).InfoS("Setting new DACL for path", "path", path, "dacl", dacl) - - // create a new security descriptor from the DACL string - newSD, err := windows.SecurityDescriptorFromString(dacl) - if err != nil { - return fmt.Errorf("Error creating new security descriptor from DACL string: %v", err) - } - - // get the DACL in binary format from the newly created security descriptor - newDACL, _, err := newSD.DACL() - if err != nil { - return fmt.Errorf("Error getting DACL from new security descriptor: %v", err) - } - - // Write the new security descriptor to the file - return windows.SetNamedSecurityInfo( - path, - windows.SE_FILE_OBJECT, - windows.DACL_SECURITY_INFORMATION|windows.PROTECTED_DACL_SECURITY_INFORMATION, - nil, // owner SID - nil, // group SID - newDACL, - nil) // SACL -} - -// IsAbs returns whether the given path is absolute or not. -// On Windows, filepath.IsAbs will not return True for paths prefixed with a slash, even -// though they can be used as absolute paths (https://docs.microsoft.com/en-us/dotnet/standard/io/file-path-formats). -// -// WARN: It isn't safe to use this for API values which will propagate across systems (e.g. REST API values -// that get validated on Unix, persisted, then consumed by Windows, etc). -func IsAbs(path string) bool { - return filepath.IsAbs(path) || strings.HasPrefix(path, `\`) || strings.HasPrefix(path, `/`) -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/watcher.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/watcher.go deleted file mode 100644 index cbbc83985de..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/filesystem/watcher.go +++ /dev/null @@ -1,216 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "context" - "fmt" - "time" - - "github.com/fsnotify/fsnotify" -) - -// FSWatcher is a callback-based filesystem watcher abstraction for fsnotify. -type FSWatcher interface { - // Initializes the watcher with the given watch handlers. - // Called before all other methods. - Init(FSEventHandler, FSErrorHandler) error - - // Starts listening for events and errors. - // When an event or error occurs, the corresponding handler is called. - Run() - - // Add a filesystem path to watch - AddWatch(path string) error -} - -// FSEventHandler is called when a fsnotify event occurs. -type FSEventHandler func(event fsnotify.Event) - -// FSErrorHandler is called when a fsnotify error occurs. -type FSErrorHandler func(err error) - -type fsnotifyWatcher struct { - watcher *fsnotify.Watcher - eventHandler FSEventHandler - errorHandler FSErrorHandler -} - -var _ FSWatcher = &fsnotifyWatcher{} - -// NewFsnotifyWatcher returns an implementation of FSWatcher that continuously listens for -// fsnotify events and calls the event handler as soon as an event is received. -func NewFsnotifyWatcher() FSWatcher { - return &fsnotifyWatcher{} -} - -func (w *fsnotifyWatcher) AddWatch(path string) error { - return w.watcher.Add(path) -} - -func (w *fsnotifyWatcher) Init(eventHandler FSEventHandler, errorHandler FSErrorHandler) error { - var err error - w.watcher, err = fsnotify.NewWatcher() - if err != nil { - return err - } - - w.eventHandler = eventHandler - w.errorHandler = errorHandler - return nil -} - -func (w *fsnotifyWatcher) Run() { - go func() { - defer w.watcher.Close() - for { - select { - case event := <-w.watcher.Events: - if w.eventHandler != nil { - w.eventHandler(event) - } - case err := <-w.watcher.Errors: - if w.errorHandler != nil { - w.errorHandler(err) - } - } - } - }() -} - -type watchAddRemover interface { - Add(path string) error - Remove(path string) error -} -type noopWatcher struct{} - -func (noopWatcher) Add(path string) error { return nil } -func (noopWatcher) Remove(path string) error { return nil } - -// WatchUntil watches the specified path for changes and blocks until ctx is canceled. -// eventHandler() must be non-nil, and pollInterval must be greater than 0. -// eventHandler() is invoked whenever a change event is observed or pollInterval elapses. -// errorHandler() is invoked (if non-nil) whenever an error occurs initializing or watching the specified path. -// -// If path is a directory, only the directory and immediate children are watched. -// -// If path does not exist or cannot be watched, an error is passed to errorHandler() and eventHandler() is called at pollInterval. -// -// Multiple observed events may collapse to a single invocation of eventHandler(). -// -// eventHandler() is invoked immediately after successful initialization of the filesystem watch, -// in case the path changed concurrent with calling WatchUntil(). -func WatchUntil(ctx context.Context, pollInterval time.Duration, path string, eventHandler func(), errorHandler func(err error)) { - if pollInterval <= 0 { - panic(fmt.Errorf("pollInterval must be > 0")) - } - if eventHandler == nil { - panic(fmt.Errorf("eventHandler must be non-nil")) - } - if errorHandler == nil { - errorHandler = func(err error) {} - } - - // Initialize watcher, fall back to no-op - var ( - eventsCh chan fsnotify.Event - errorCh chan error - watcher watchAddRemover - ) - if w, err := fsnotify.NewWatcher(); err != nil { - errorHandler(fmt.Errorf("error creating file watcher, falling back to poll at interval %s: %w", pollInterval, err)) - watcher = noopWatcher{} - } else { - watcher = w - eventsCh = w.Events - errorCh = w.Errors - defer func() { - _ = w.Close() - }() - } - - // Initialize background poll - t := time.NewTicker(pollInterval) - defer t.Stop() - - attemptPeriodicRewatch := false - - // Start watching the path - if err := watcher.Add(path); err != nil { - errorHandler(err) - attemptPeriodicRewatch = true - } else { - // Invoke handle() at least once after successfully registering the listener, - // in case the file changed concurrent with calling WatchUntil. - eventHandler() - } - - for { - select { - case <-ctx.Done(): - return - - case <-t.C: - // Prioritize exiting if context is canceled - if ctx.Err() != nil { - return - } - - // Try to re-establish the watcher if we previously got a watch error - if attemptPeriodicRewatch { - _ = watcher.Remove(path) - if err := watcher.Add(path); err != nil { - errorHandler(err) - } else { - attemptPeriodicRewatch = false - } - } - - // Handle - eventHandler() - - case e := <-eventsCh: - // Prioritize exiting if context is canceled - if ctx.Err() != nil { - return - } - - // Try to re-establish the watcher for events which dropped the existing watch - if e.Name == path && (e.Has(fsnotify.Remove) || e.Has(fsnotify.Rename)) { - _ = watcher.Remove(path) - if err := watcher.Add(path); err != nil { - errorHandler(err) - attemptPeriodicRewatch = true - } - } - - // Handle - eventHandler() - - case err := <-errorCh: - // Prioritize exiting if context is canceled - if ctx.Err() != nil { - return - } - - // If the error occurs in response to calling watcher.Add, re-adding here could hot-loop. - // The periodic poll will attempt to re-establish the watch. - errorHandler(err) - attemptPeriodicRewatch = true - } - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/kernel/OWNERS b/e2e/vendor/k8s.io/kubernetes/pkg/util/kernel/OWNERS deleted file mode 100644 index 9437a585813..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/kernel/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - sig-network-reviewers - - sig-node-reviewers -approvers: - - sig-network-approvers - - sig-node-approvers diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/kernel/constants.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/kernel/constants.go deleted file mode 100644 index 1467f6c229d..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/kernel/constants.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kernel - -// IPLocalReservedPortsNamespacedKernelVersion is the kernel version in which net.ipv4.ip_local_reserved_ports was namespaced(netns). -// (ref: https://github.com/torvalds/linux/commit/122ff243f5f104194750ecbc76d5946dd1eec934) -const IPLocalReservedPortsNamespacedKernelVersion = "3.16" - -// IPVSConnReuseModeMinSupportedKernelVersion is the minium kernel version supporting net.ipv4.vs.conn_reuse_mode. -// (ref: https://github.com/torvalds/linux/commit/d752c364571743d696c2a54a449ce77550c35ac5) -const IPVSConnReuseModeMinSupportedKernelVersion = "4.1" - -// TCPKeepAliveTimeNamespacedKernelVersion is the kernel version in which net.ipv4.tcp_keepalive_time was namespaced(netns). -// (ref: https://github.com/torvalds/linux/commit/13b287e8d1cad951634389f85b8c9b816bd3bb1e) -const TCPKeepAliveTimeNamespacedKernelVersion = "4.5" - -// TCPKeepAliveIntervalNamespacedKernelVersion is the kernel version in which net.ipv4.tcp_keepalive_intvl was namespaced(netns). -// (ref: https://github.com/torvalds/linux/commit/b840d15d39128d08ed4486085e5507d2617b9ae1) -const TCPKeepAliveIntervalNamespacedKernelVersion = "4.5" - -// TCPKeepAliveProbesNamespacedKernelVersion is the kernel version in which net.ipv4.tcp_keepalive_probes was namespaced(netns). -// (ref: https://github.com/torvalds/linux/commit/9bd6861bd4326e3afd3f14a9ec8a723771fb20bb) -const TCPKeepAliveProbesNamespacedKernelVersion = "4.5" - -// TCPFinTimeoutNamespacedKernelVersion is the kernel version in which net.ipv4.tcp_fin_timeout was namespaced(netns). -// (ref: https://github.com/torvalds/linux/commit/1e579caa18b96f9eb18f4f5416658cd15f37c062) -const TCPFinTimeoutNamespacedKernelVersion = "4.6" - -// IPVSConnReuseModeFixedKernelVersion is the kernel version in which net.ipv4.vs.conn_reuse_mode was fixed. -// (ref: https://github.com/torvalds/linux/commit/35dfb013149f74c2be1ff9c78f14e6a3cd1539d1) -const IPVSConnReuseModeFixedKernelVersion = "5.9" - -const TmpfsNoswapSupportKernelVersion = "6.4" - -// NFTablesKubeProxyKernelVersion is the lowest kernel version kube-proxy supports using -// nftables mode with by default. This is not directly related to any specific kernel -// commit; see https://issues.k8s.io/122743#issuecomment-1893922424 -const NFTablesKubeProxyKernelVersion = "5.13" - -// TCPReceiveMemoryNamespacedKernelVersion is the kernel version in which net.ipv4.tcp_rmem was namespaced(netns). -// (ref: https://github.com/torvalds/linux/commit/356d1833b638bd465672aefeb71def3ab93fc17d) -const TCPReceiveMemoryNamespacedKernelVersion = "4.15" - -// TCPTransmitMemoryNamespacedKernelVersion is the kernel version in which net.ipv4.tcp_wmem was namespaced(netns). -// (ref: https://github.com/torvalds/linux/commit/356d1833b638bd465672aefeb71def3ab93fc17d) -const TCPTransmitMemoryNamespacedKernelVersion = "4.15" diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/kernel/version.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/kernel/version.go deleted file mode 100644 index 79f0bf9a5e9..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/kernel/version.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kernel - -import ( - "fmt" - "os" - "strings" - - "k8s.io/apimachinery/pkg/util/version" -) - -type readFileFunc func(string) ([]byte, error) - -// GetVersion returns currently running kernel version. -func GetVersion() (*version.Version, error) { - return getVersion(os.ReadFile) -} - -// getVersion reads os release file from the give readFile function. -func getVersion(readFile readFileFunc) (*version.Version, error) { - kernelVersionFile := "/proc/sys/kernel/osrelease" - fileContent, err := readFile(kernelVersionFile) - if err != nil { - return nil, fmt.Errorf("failed to read os-release file: %s", err.Error()) - } - - kernelVersion, err := version.ParseGeneric(strings.TrimSpace(string(fileContent))) - if err != nil { - return nil, fmt.Errorf("failed to parse kernel version: %s", err.Error()) - } - - return kernelVersion, nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/doc.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/doc.go deleted file mode 100644 index 9825a53f2a5..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package oom implements utility functions relating to out of memory management. -package oom diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom.go deleted file mode 100644 index a5b1ea0ffc6..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package oom - -// This is a struct instead of an interface to allow injection of process ID listers and -// applying OOM score in tests. -// TODO: make this an interface, and inject a mock ioutil struct for testing. -type OOMAdjuster struct { - pidLister func(cgroupName string) ([]int, error) - ApplyOOMScoreAdj func(pid int, oomScoreAdj int) error - ApplyOOMScoreAdjContainer func(cgroupName string, oomScoreAdj, maxTries int) error -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom_fake.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom_fake.go deleted file mode 100644 index 2712c5d0b8e..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom_fake.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package oom - -type FakeOOMAdjuster struct{} - -func NewFakeOOMAdjuster() *OOMAdjuster { - return &OOMAdjuster{ - pidLister: func(cgroupName string) ([]int, error) { return make([]int, 0), nil }, - ApplyOOMScoreAdj: fakeApplyOOMScoreAdj, - ApplyOOMScoreAdjContainer: fakeApplyOOMScoreAdjContainer, - } -} - -func fakeApplyOOMScoreAdj(pid int, oomScoreAdj int) error { - return nil -} - -func fakeApplyOOMScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error { - return nil -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go deleted file mode 100644 index 380967f9c00..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go +++ /dev/null @@ -1,130 +0,0 @@ -//go:build linux -// +build linux - -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package oom - -import ( - "fmt" - "os" - "path" - "path/filepath" - "strconv" - "time" - - cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" - - "k8s.io/klog/v2" -) - -func NewOOMAdjuster() *OOMAdjuster { - oomAdjuster := &OOMAdjuster{ - pidLister: getPids, - ApplyOOMScoreAdj: applyOOMScoreAdj, - } - oomAdjuster.ApplyOOMScoreAdjContainer = oomAdjuster.applyOOMScoreAdjContainer - return oomAdjuster -} - -func getPids(cgroupName string) ([]int, error) { - return cmutil.GetPids(filepath.Join("/", cgroupName)) -} - -// Writes 'value' to /proc//oom_score_adj. PID = 0 means self -// Returns os.ErrNotExist if the `pid` does not exist. -func applyOOMScoreAdj(pid int, oomScoreAdj int) error { - if pid < 0 { - return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid) - } - - var pidStr string - if pid == 0 { - pidStr = "self" - } else { - pidStr = strconv.Itoa(pid) - } - - maxTries := 2 - oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj") - value := strconv.Itoa(oomScoreAdj) - klog.V(4).Infof("attempting to set %q to %q", oomScoreAdjPath, value) - var err error - for i := 0; i < maxTries; i++ { - err = os.WriteFile(oomScoreAdjPath, []byte(value), 0700) - if err != nil { - if os.IsNotExist(err) { - klog.V(2).Infof("%q does not exist", oomScoreAdjPath) - return os.ErrNotExist - } - - klog.V(3).Info(err) - time.Sleep(100 * time.Millisecond) - continue - } - return nil - } - if err != nil { - klog.V(2).Infof("failed to set %q to %q: %v", oomScoreAdjPath, value, err) - } - return err -} - -// Writes 'value' to /proc//oom_score_adj for all processes in cgroup cgroupName. -// Keeps trying to write until the process list of the cgroup stabilizes, or until maxTries tries. -func (oomAdjuster *OOMAdjuster) applyOOMScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error { - adjustedProcessSet := make(map[int]bool) - for i := 0; i < maxTries; i++ { - continueAdjusting := false - pidList, err := oomAdjuster.pidLister(cgroupName) - if err != nil { - if os.IsNotExist(err) { - // Nothing to do since the container doesn't exist anymore. - return os.ErrNotExist - } - continueAdjusting = true - klog.V(10).Infof("Error getting process list for cgroup %s: %+v", cgroupName, err) - } else if len(pidList) == 0 { - klog.V(10).Infof("Pid list is empty") - continueAdjusting = true - } else { - for _, pid := range pidList { - if !adjustedProcessSet[pid] { - klog.V(10).Infof("pid %d needs to be set", pid) - if err = oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err == nil { - adjustedProcessSet[pid] = true - } else if err == os.ErrNotExist { - continue - } else { - klog.V(10).Infof("cannot adjust oom score for pid %d - %v", pid, err) - continueAdjusting = true - } - // Processes can come and go while we try to apply oom score adjust value. So ignore errors here. - } - } - } - if !continueAdjusting { - return nil - } - // There's a slight race. A process might have forked just before we write its OOM score adjust. - // The fork might copy the parent process's old OOM score, then this function might execute and - // update the parent's OOM score, but the forked process id might not be reflected in cgroup.procs - // for a short amount of time. So this function might return without changing the forked process's - // OOM score. Very unlikely race, so ignoring this for now. - } - return fmt.Errorf("exceeded maxTries, some processes might not have desired OOM score") -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom_unsupported.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom_unsupported.go deleted file mode 100644 index 1e4b926e24e..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/oom/oom_unsupported.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build !linux -// +build !linux - -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package oom - -import ( - "errors" -) - -var unsupportedErr = errors.New("setting OOM scores is unsupported in this build") - -func NewOOMAdjuster() *OOMAdjuster { - return &OOMAdjuster{ - ApplyOOMScoreAdj: unsupportedApplyOOMScoreAdj, - ApplyOOMScoreAdjContainer: unsupportedApplyOOMScoreAdjContainer, - } -} - -func unsupportedApplyOOMScoreAdj(pid int, oomScoreAdj int) error { - return unsupportedErr -} - -func unsupportedApplyOOMScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error { - return unsupportedErr -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go deleted file mode 100644 index 8caa21007fa..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/strategicpatch" - clientset "k8s.io/client-go/kubernetes" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" -) - -// PatchPodStatus patches pod status. It returns true and avoids an update if the patch contains no changes. -func PatchPodStatus(ctx context.Context, c clientset.Interface, namespace, name string, uid types.UID, oldPodStatus, newPodStatus v1.PodStatus) (*v1.Pod, []byte, bool, error) { - patchBytes, unchanged, err := preparePatchBytesForPodStatus(namespace, name, uid, oldPodStatus, newPodStatus) - if err != nil { - return nil, nil, false, err - } - if unchanged { - return nil, patchBytes, true, nil - } - - updatedPod, err := c.CoreV1().Pods(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") - if err != nil { - return nil, nil, false, fmt.Errorf("failed to patch status %q for pod %q/%q: %v", patchBytes, namespace, name, err) - } - return updatedPod, patchBytes, false, nil -} - -func preparePatchBytesForPodStatus(namespace, name string, uid types.UID, oldPodStatus, newPodStatus v1.PodStatus) ([]byte, bool, error) { - oldData, err := json.Marshal(v1.Pod{ - Status: oldPodStatus, - }) - if err != nil { - return nil, false, fmt.Errorf("failed to Marshal oldData for pod %q/%q: %v", namespace, name, err) - } - - newData, err := json.Marshal(v1.Pod{ - ObjectMeta: metav1.ObjectMeta{UID: uid}, // only put the uid in the new object to ensure it appears in the patch as a precondition - Status: newPodStatus, - }) - if err != nil { - return nil, false, fmt.Errorf("failed to Marshal newData for pod %q/%q: %v", namespace, name, err) - } - - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Pod{}) - if err != nil { - return nil, false, fmt.Errorf("failed to CreateTwoWayMergePatch for pod %q/%q: %v", namespace, name, err) - } - return patchBytes, bytes.Equal(patchBytes, []byte(fmt.Sprintf(`{"metadata":{"uid":%q}}`, uid))), nil -} - -// ReplaceOrAppendPodCondition replaces the first pod condition with equal type or appends if there is none -func ReplaceOrAppendPodCondition(conditions []v1.PodCondition, condition *v1.PodCondition) []v1.PodCondition { - if i, _ := podutil.GetPodConditionFromList(conditions, condition.Type); i >= 0 { - conditions[i] = *condition - } else { - conditions = append(conditions, *condition) - } - return conditions -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go b/e2e/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go deleted file mode 100644 index 872fbdcad65..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package slice provides utility methods for common operations on slices. -package slice - -import ( - "sort" -) - -// CopyStrings copies the contents of the specified string slice -// into a new slice. -func CopyStrings(s []string) []string { - if s == nil { - return nil - } - c := make([]string, len(s)) - copy(c, s) - return c -} - -// SortStrings sorts the specified string slice in place. It returns the same -// slice that was provided in order to facilitate method chaining. -func SortStrings(s []string) []string { - sort.Strings(s) - return s -} - -// ContainsString checks if a given slice of strings contains the provided string. -// If a modifier func is provided, it is called with the slice item before the comparation. -func ContainsString(slice []string, s string, modifier func(s string) string) bool { - for _, item := range slice { - if item == s { - return true - } - if modifier != nil && modifier(item) == s { - return true - } - } - return false -} - -// RemoveString returns a newly created []string that contains all items from slice that -// are not equal to s and modifier(s) in case modifier func is provided. -func RemoveString(slice []string, s string, modifier func(s string) string) []string { - newSlice := make([]string, 0) - for _, item := range slice { - if item == s { - continue - } - if modifier != nil && modifier(item) == s { - continue - } - newSlice = append(newSlice, item) - } - if len(newSlice) == 0 { - // Sanitize for unit tests so we don't need to distinguish empty array - // and nil. - newSlice = nil - } - return newSlice -} diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/e2e/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index c4e5fee2851..a6426cbd9d0 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -17,15 +17,14 @@ limitations under the License. package volume import ( + "context" "errors" "fmt" - "net" "strings" "sync" "k8s.io/klog/v2" "k8s.io/mount-utils" - "k8s.io/utils/exec" authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" @@ -174,8 +173,6 @@ type VolumePlugin interface { ConstructVolumeSpec(volumeName, volumePath string) (ReconstructedVolume, error) // SupportsMountOption returns true if volume plugins supports Mount options - // Specifying mount options in a volume plugin that doesn't support - // user specified mount options will result in error creating persistent volumes SupportsMountOption() bool // SupportsSELinuxContextMount returns true if volume plugins supports @@ -234,7 +231,7 @@ type AttachableVolumePlugin interface { NewDetacher() (Detacher, error) // CanAttach tests if provided volume spec is attachable CanAttach(spec *Spec) (bool, error) - VerifyExhaustedResource(spec *Spec, nodeName types.NodeName) + VerifyExhaustedResource(spec *Spec) bool } // DeviceMountableVolumePlugin is an extended interface of VolumePlugin and is used @@ -316,6 +313,9 @@ type KubeletVolumeHost interface { // Returns trust anchors from the ClusterTrustBundles selected by signer // name and label selector. GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error) + + // Returns the credential bundle for the specified podCertificate projected volume source. + GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error) } // CSIDriverVolumeHost is a volume host that has access to CSIDriverLister @@ -388,14 +388,11 @@ type VolumeHost interface { NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error) // Get mounter interface. - GetMounter(pluginName string) mount.Interface + GetMounter() mount.Interface // Returns the hostname of the host kubelet is running on GetHostName() string - // Returns host IP or nil in the case of error. - GetHostIP() (net.IP, error) - // Returns node allocatable. GetNodeAllocatable() (v1.ResourceList, error) @@ -409,9 +406,6 @@ type VolumeHost interface { DeleteServiceAccountTokenFunc() func(podUID types.UID) - // Returns an interface that should be used to execute any utilities in volume plugins - GetExec(pluginName string) exec.Interface - // Returns the labels on the node GetNodeLabels() (map[string]string, error) diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go index 943357b6538..4249a6be4f8 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go @@ -25,28 +25,6 @@ import ( // shared between volume package and scheduler const ( - // EBSVolumeLimitKey resource name that will store volume limits for EBS - EBSVolumeLimitKey = "attachable-volumes-aws-ebs" - // EBSNitroLimitRegex finds nitro instance types with different limit than EBS defaults - EBSNitroLimitRegex = "^[cmr]5.*|t3|z1d" - // DefaultMaxEBSVolumes is the limit for volumes attached to an instance. - // Amazon recommends no more than 40; the system root volume uses at least one. - // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits - DefaultMaxEBSVolumes = 39 - // DefaultMaxEBSNitroVolumeLimit is default EBS volume limit on m5 and c5 instances - DefaultMaxEBSNitroVolumeLimit = 25 - // AzureVolumeLimitKey stores resource name that will store volume limits for Azure - AzureVolumeLimitKey = "attachable-volumes-azure-disk" - // GCEVolumeLimitKey stores resource name that will store volume limits for GCE node - GCEVolumeLimitKey = "attachable-volumes-gce-pd" - - // CinderVolumeLimitKey contains Volume limit key for Cinder - CinderVolumeLimitKey = "attachable-volumes-cinder" - // DefaultMaxCinderVolumes defines the maximum number of PD Volumes for Cinder - // For Openstack we are keeping this to a high enough value so as depending on backend - // cluster admins can configure it. - DefaultMaxCinderVolumes = 256 - // CSIAttachLimitPrefix defines prefix used for CSI volumes CSIAttachLimitPrefix = "attachable-volumes-csi-" diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go index 599f220976e..189db6ed843 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/volume" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/mount-utils" + "k8s.io/utils/exec" ) var ( @@ -456,7 +457,7 @@ func mergeStorageAllocatedResources(pvc *v1.PersistentVolumeClaim, size resource } // GenericResizeFS : call generic filesystem resizer for plugins that don't have any special filesystem resize requirements -func GenericResizeFS(host volume.VolumeHost, pluginName, devicePath, deviceMountPath string) (bool, error) { - resizer := mount.NewResizeFs(host.GetExec(pluginName)) +func GenericResizeFS(host volume.VolumeHost, devicePath, deviceMountPath string) (bool, error) { + resizer := mount.NewResizeFs(exec.New()) return resizer.Resize(devicePath, deviceMountPath) } diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go index 316962f8f5d..3334213236d 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go @@ -25,6 +25,10 @@ import ( "k8s.io/mount-utils" ) +var ( + NodeExpansionNotRequired = "volume.kubernetes.io/node-expansion-not-required" +) + // UniquePodName defines the type to key pods off of type UniquePodName types.UID diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index 0173902eb87..4c77d5a9b0a 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -333,14 +333,6 @@ func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) { return pluginName, components[2], nil } -// NewSafeFormatAndMountFromHost creates a new SafeFormatAndMount with Mounter -// and Exec taken from given VolumeHost. -func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount { - mounter := host.GetMounter(pluginName) - exec := host.GetExec(pluginName) - return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec} -} - // GetVolumeMode retrieves VolumeMode from pv. // If the volume doesn't have PersistentVolume, it's an inline volume, // should return volumeMode as filesystem to keep existing behavior. diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go index fa0b24a18d8..1f468410bf2 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go @@ -103,12 +103,12 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName } if bindMount { - return mapBindMountDevice(v, devicePath, mapPath, linkName) + return mapBindMountDevice(devicePath, mapPath, linkName) } - return mapSymlinkDevice(v, devicePath, mapPath, linkName) + return mapSymlinkDevice(devicePath, mapPath, linkName) } -func mapBindMountDevice(v VolumePathHandler, devicePath string, mapPath string, linkName string) error { +func mapBindMountDevice(devicePath string, mapPath string, linkName string) error { // Check bind mount exists linkPath := filepath.Join(mapPath, string(linkName)) @@ -146,7 +146,7 @@ func mapBindMountDevice(v VolumePathHandler, devicePath string, mapPath string, return nil } -func mapSymlinkDevice(v VolumePathHandler, devicePath string, mapPath string, linkName string) error { +func mapSymlinkDevice(devicePath string, mapPath string, linkName string) error { // Remove old symbolic link(or file) then create new one. // This should be done because current symbolic link is // stale across node reboot. diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume.go b/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume.go index f5f933e0c6a..36814591ccb 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume.go @@ -134,6 +134,15 @@ type MounterArgs struct { DesiredSize *resource.Quantity SELinuxLabel string Recorder record.EventRecorder + + // Optional interface that will be used to change the ownership of the volume, if specified. + // mainly used by unit tests + VolumeOwnershipApplicator VolumeOwnershipChanger +} + +type VolumeOwnershipChanger interface { + AddProgressNotifier(pod *v1.Pod, recorder record.EventRecorder) VolumeOwnershipChanger + ChangePermissions() error } type VolumeOwnership struct { diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go b/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go index 9d023abfa05..0df840cdea9 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go @@ -51,7 +51,7 @@ var ( ) // NewVolumeOwnership returns an interface that can be used to recursively change volume permissions and ownership -func NewVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) *VolumeOwnership { +func NewVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) VolumeOwnershipChanger { vo := &VolumeOwnership{ mounter: mounter, dir: dir, @@ -63,7 +63,7 @@ func NewVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChan return vo } -func (vo *VolumeOwnership) AddProgressNotifier(pod *v1.Pod, recorder record.EventRecorder) *VolumeOwnership { +func (vo *VolumeOwnership) AddProgressNotifier(pod *v1.Pod, recorder record.EventRecorder) VolumeOwnershipChanger { vo.pod = pod vo.recorder = recorder return vo diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go b/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go index cfedeaa3221..ef46dac618d 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go +++ b/e2e/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go @@ -26,11 +26,11 @@ import ( ) // NewVolumeOwnership returns an interface that can be used to recursively change volume permissions and ownership -func NewVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) *VolumeOwnership { - return nil +func NewVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) VolumeOwnershipChanger { + return &VolumeOwnership{} } -func (vo *VolumeOwnership) AddProgressNotifier(pod *v1.Pod, recorder record.EventRecorder) *VolumeOwnership { +func (vo *VolumeOwnership) AddProgressNotifier(pod *v1.Pod, recorder record.EventRecorder) VolumeOwnershipChanger { return vo } diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/debug/resource_usage_gatherer.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/debug/resource_usage_gatherer.go index 7e6875b49bd..aed88d5188f 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/debug/resource_usage_gatherer.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/debug/resource_usage_gatherer.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" + "k8s.io/utils/ptr" ) // ResourceConstraint is a struct to hold constraints. @@ -259,10 +260,10 @@ func getOneTimeResourceUsageOnNode( return &ContainerResourceUsage{ Name: name, Timestamp: newStats.StartTime.Time, - CPUUsageInCores: float64(removeUint64Ptr(newStats.CPU.UsageNanoCores)) / 1000000000, - MemoryUsageInBytes: removeUint64Ptr(newStats.Memory.UsageBytes), - MemoryWorkingSetInBytes: removeUint64Ptr(newStats.Memory.WorkingSetBytes), - MemoryRSSInBytes: removeUint64Ptr(newStats.Memory.RSSBytes), + CPUUsageInCores: float64(ptr.Deref(newStats.CPU.UsageNanoCores, 0)) / 1000000000, + MemoryUsageInBytes: ptr.Deref(newStats.Memory.UsageBytes, 0), + MemoryWorkingSetInBytes: ptr.Deref(newStats.Memory.WorkingSetBytes, 0), + MemoryRSSInBytes: ptr.Deref(newStats.Memory.RSSBytes, 0), CPUInterval: 0, } } @@ -313,13 +314,6 @@ func getStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alp return &summary, nil } -func removeUint64Ptr(ptr *uint64) uint64 { - if ptr == nil { - return 0 - } - return *ptr -} - func (w *resourceGatherWorker) gather(ctx context.Context, initialSleep time.Duration) { defer utilruntime.HandleCrash() defer w.wg.Done() diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go index ff08e25b4d3..74df75a6346 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go @@ -544,6 +544,11 @@ func (f *Framework) ClientConfig() *rest.Config { return ret } +// SetClientConfig allows setting or updating the REST config in the Framework instance. +func (f *Framework) SetClientConfig(config *rest.Config) { + f.clientConfig = rest.CopyConfig(config) +} + // KubeUser is a struct for managing kubernetes user info. type KubeUser struct { Name string `yaml:"name"` diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/get.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/get.go index 1e1a83856d6..f941a7bb9c3 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/get.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/get.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "io" "time" "github.com/onsi/gomega" @@ -103,6 +104,7 @@ func ShouldRetry(err error) (retry bool, retryAfter time.Duration) { if apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) || apierrors.IsServiceUnavailable(err) || + errors.Is(err, io.EOF) || errors.As(err, &transientError{}) { return true, 0 } diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go index ccc045ba791..06d6974ad96 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go @@ -343,7 +343,7 @@ func recordTextBug(location types.CodeLocation, message string) { RecordBug(Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message}) } -// WithEnvironment specifies that a certain test or group of tests only works +// WithFeature specifies that a certain test or group of tests only works // with a feature available. The return value must be passed as additional // argument to [framework.It], [framework.Describe], [framework.Context]. // @@ -444,7 +444,7 @@ func withEnvironment(name Environment) interface{} { return newLabel("Environment", string(name)) } -// WithConformace specifies that a certain test or group of tests must pass in +// WithConformance specifies that a certain test or group of tests must pass in // all conformant Kubernetes clusters. The return value must be passed as // additional argument to [framework.It], [framework.Describe], // [framework.Context]. @@ -515,9 +515,10 @@ func withSerial() interface{} { return newLabel("Serial") } -// WithSlow specifies that a certain test or group of tests must not run in -// parallel with other tests. The return value must be passed as additional -// argument to [framework.It], [framework.Describe], [framework.Context]. +// WithSlow specifies that a certain test, or each test within a group of +// tests, is slow (is expected to take longer than 5 minutes to run in CI). +// The return value must be passed as additional argument to [framework.It], +// [framework.Describe], [framework.Context]. func WithSlow() interface{} { return withSlow() } diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go index e33acb7d1b3..8c8faaa1f22 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go @@ -37,8 +37,13 @@ import ( // KubectlBuilder is used to build, customize and execute a kubectl Command. // Add more functions to customize the builder as needed. type KubectlBuilder struct { - cmd *exec.Cmd - timeout <-chan time.Time + cmd *exec.Cmd + // appendEnv contains only AppendEnv(...) values and NOT os.Environ() + // logging os.Environ is ~redundant and noisy + // logging the env we modified is important to understand what we're running + // versus the defaults + appendedEnv []string + timeout <-chan time.Time } // NewKubectlCommand returns a KubectlBuilder for running kubectl. @@ -55,6 +60,7 @@ func (b *KubectlBuilder) AppendEnv(env []string) *KubectlBuilder { b.cmd.Env = os.Environ() } b.cmd.Env = append(b.cmd.Env, env...) + b.appendedEnv = append(b.appendedEnv, env...) return b } @@ -118,7 +124,11 @@ func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) { cmd := b.cmd cmd.Stdout, cmd.Stderr = &stdout, &stderr - framework.Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately + if len(b.appendedEnv) > 0 { + framework.Logf("Running '%s %s %s'", strings.Join(b.appendedEnv, " "), cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately + } else { + framework.Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately + } if err := cmd.Start(); err != nil { return "", "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err) } diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go index 360862d3438..04cf7f8a984 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go @@ -26,6 +26,7 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" ) @@ -44,7 +45,7 @@ func DeletePodOrFail(ctx context.Context, c clientset.Interface, ns, name string return } - expectNoError(err, "failed to delete pod %s in namespace %s", name, ns) + framework.ExpectNoErrorWithOffset(1, err, "failed to delete pod %s in namespace %s", name, ns) } // DeletePodWithWait deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod @@ -56,6 +57,21 @@ func DeletePodWithWait(ctx context.Context, c clientset.Interface, pod *v1.Pod) return DeletePodWithWaitByName(ctx, c, pod.GetName(), pod.GetNamespace()) } +// DeletePodsWithWait deletes the passed-in pods, waits for them to be terminated, and reports any deletion errors that occur. +func DeletePodsWithWait(ctx context.Context, c clientset.Interface, pods []*v1.Pod) { + var delErrs []error + for _, testPod := range pods { + delErr := c.CoreV1().Pods(testPod.Namespace).Delete(ctx, testPod.Name, metav1.DeleteOptions{}) + if delErr != nil && !apierrors.IsNotFound(delErr) { + delErrs = append(delErrs, delErr) + } + } + framework.ExpectNoError(errors.NewAggregate(delErrs), "while deleting pods") + for _, testPod := range pods { + framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c, testPod.Name, testPod.Namespace, PodDeleteTimeout)) + } +} + // DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod // not existing. func DeletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName, podNamespace string) error { diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go index 3494ef5a8c5..664eae425ca 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go @@ -130,6 +130,19 @@ func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod { } +// TryCreate attempts to create a new pod according to the provided specification. +// This function is designed to return an error to the caller if pod creation fails. +func (c *PodClient) TryCreate(ctx context.Context, pod *v1.Pod) (*v1.Pod, error) { + ginkgo.GinkgoHelper() + c.mungeSpec(pod) + c.setOwnerAnnotation(pod) + p, err := c.PodInterface.Create(ctx, pod, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("error creating pod %s/%s: %w", pod.Namespace, pod.Name, err) + } + return p, nil +} + // CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready. func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod { ginkgo.GinkgoHelper() diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resize.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resize.go deleted file mode 100644 index af8d43ac34a..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resize.go +++ /dev/null @@ -1,537 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - helpers "k8s.io/component-helpers/resource" - "k8s.io/kubectl/pkg/util/podutils" - kubecm "k8s.io/kubernetes/pkg/kubelet/cm" - kubeqos "k8s.io/kubernetes/pkg/kubelet/qos" - "k8s.io/kubernetes/test/e2e/framework" - imageutils "k8s.io/kubernetes/test/utils/image" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -const ( - CgroupCPUPeriod string = "/sys/fs/cgroup/cpu/cpu.cfs_period_us" - CgroupCPUShares string = "/sys/fs/cgroup/cpu/cpu.shares" - CgroupCPUQuota string = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" - CgroupMemLimit string = "/sys/fs/cgroup/memory/memory.limit_in_bytes" - Cgroupv2MemLimit string = "/sys/fs/cgroup/memory.max" - Cgroupv2MemRequest string = "/sys/fs/cgroup/memory.min" - Cgroupv2CPULimit string = "/sys/fs/cgroup/cpu.max" - Cgroupv2CPURequest string = "/sys/fs/cgroup/cpu.weight" - CPUPeriod string = "100000" - MinContainerRuntimeVersion string = "1.6.9" -) - -var ( - podOnCgroupv2Node *bool -) - -type ContainerResources struct { - CPUReq string - CPULim string - MemReq string - MemLim string - EphStorReq string - EphStorLim string - ExtendedResourceReq string - ExtendedResourceLim string -} - -func (cr *ContainerResources) ResourceRequirements() *v1.ResourceRequirements { - if cr == nil { - return nil - } - - var lim, req v1.ResourceList - if cr.CPULim != "" || cr.MemLim != "" || cr.EphStorLim != "" { - lim = make(v1.ResourceList) - } - if cr.CPUReq != "" || cr.MemReq != "" || cr.EphStorReq != "" { - req = make(v1.ResourceList) - } - if cr.CPULim != "" { - lim[v1.ResourceCPU] = resource.MustParse(cr.CPULim) - } - if cr.MemLim != "" { - lim[v1.ResourceMemory] = resource.MustParse(cr.MemLim) - } - if cr.EphStorLim != "" { - lim[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorLim) - } - if cr.CPUReq != "" { - req[v1.ResourceCPU] = resource.MustParse(cr.CPUReq) - } - if cr.MemReq != "" { - req[v1.ResourceMemory] = resource.MustParse(cr.MemReq) - } - if cr.EphStorReq != "" { - req[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorReq) - } - return &v1.ResourceRequirements{Limits: lim, Requests: req} -} - -type ResizableContainerInfo struct { - Name string - Resources *ContainerResources - CPUPolicy *v1.ResourceResizeRestartPolicy - MemPolicy *v1.ResourceResizeRestartPolicy - RestartCount int32 - RestartPolicy v1.ContainerRestartPolicy - InitCtr bool -} - -type containerPatch struct { - Name string `json:"name"` - Resources struct { - Requests struct { - CPU string `json:"cpu,omitempty"` - Memory string `json:"memory,omitempty"` - EphStor string `json:"ephemeral-storage,omitempty"` - } `json:"requests"` - Limits struct { - CPU string `json:"cpu,omitempty"` - Memory string `json:"memory,omitempty"` - EphStor string `json:"ephemeral-storage,omitempty"` - } `json:"limits"` - } `json:"resources"` -} - -type patchSpec struct { - Spec struct { - Containers []containerPatch `json:"containers"` - } `json:"spec"` -} - -func getTestResourceInfo(tcInfo ResizableContainerInfo) (res v1.ResourceRequirements, resizePol []v1.ContainerResizePolicy) { - if tcInfo.Resources != nil { - res = *tcInfo.Resources.ResourceRequirements() - } - if tcInfo.CPUPolicy != nil { - cpuPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: *tcInfo.CPUPolicy} - resizePol = append(resizePol, cpuPol) - } - if tcInfo.MemPolicy != nil { - memPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: *tcInfo.MemPolicy} - resizePol = append(resizePol, memPol) - } - return res, resizePol -} - -func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container { - cmd := "grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d" - res, resizePol := getTestResourceInfo(tcInfo) - - tc := v1.Container{ - Name: tcInfo.Name, - Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{"/bin/sh"}, - Args: []string{"-c", cmd}, - Resources: res, - ResizePolicy: resizePol, - } - if tcInfo.RestartPolicy != "" { - tc.RestartPolicy = &tcInfo.RestartPolicy - } - - return tc -} - -func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod { - testInitContainers, testContainers := separateContainers(tcInfo) - - minGracePeriodSeconds := int64(0) - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - Labels: map[string]string{ - "time": timeStamp, - }, - }, - Spec: v1.PodSpec{ - OS: &v1.PodOS{Name: v1.Linux}, - InitContainers: testInitContainers, - Containers: testContainers, - RestartPolicy: v1.RestartPolicyOnFailure, - TerminationGracePeriodSeconds: &minGracePeriodSeconds, - }, - } - return pod -} - -// separateContainers splits the input into initContainers and normal containers. -func separateContainers(tcInfo []ResizableContainerInfo) ([]v1.Container, []v1.Container) { - var initContainers, containers []v1.Container - - for _, ci := range tcInfo { - tc := makeResizableContainer(ci) - if ci.InitCtr { - initContainers = append(initContainers, tc) - } else { - containers = append(containers, tc) - } - } - - return initContainers, containers -} - -// separateContainerStatuses splits the input into initContainerStatuses and containerStatuses. -func separateContainerStatuses(tcInfo []ResizableContainerInfo) ([]v1.ContainerStatus, []v1.ContainerStatus) { - var containerStatuses, initContainerStatuses []v1.ContainerStatus - - for _, ci := range tcInfo { - ctrStatus := v1.ContainerStatus{ - Name: ci.Name, - RestartCount: ci.RestartCount, - } - if ci.InitCtr { - initContainerStatuses = append(initContainerStatuses, ctrStatus) - } else { - containerStatuses = append(containerStatuses, ctrStatus) - } - } - - return initContainerStatuses, containerStatuses -} - -func VerifyPodResizePolicy(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) { - ginkgo.GinkgoHelper() - - gotCtrs := append(append([]v1.Container{}, gotPod.Spec.Containers...), gotPod.Spec.InitContainers...) - var wantCtrs []v1.Container - for _, ci := range wantInfo { - wantCtrs = append(wantCtrs, makeResizableContainer(ci)) - } - gomega.Expect(gotCtrs).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match") - for _, wantCtr := range wantCtrs { - for _, gotCtr := range gotCtrs { - if wantCtr.Name != gotCtr.Name { - continue - } - gomega.Expect(v1.Container{Name: gotCtr.Name, ResizePolicy: gotCtr.ResizePolicy}).To(gomega.Equal(v1.Container{Name: wantCtr.Name, ResizePolicy: wantCtr.ResizePolicy})) - } - } -} - -func VerifyPodResources(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) { - ginkgo.GinkgoHelper() - - gotCtrs := append(append([]v1.Container{}, gotPod.Spec.Containers...), gotPod.Spec.InitContainers...) - var wantCtrs []v1.Container - for _, ci := range wantInfo { - wantCtrs = append(wantCtrs, makeResizableContainer(ci)) - } - gomega.Expect(gotCtrs).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match") - for _, wantCtr := range wantCtrs { - for _, gotCtr := range gotCtrs { - if wantCtr.Name != gotCtr.Name { - continue - } - gomega.Expect(v1.Container{Name: gotCtr.Name, Resources: gotCtr.Resources}).To(gomega.Equal(v1.Container{Name: wantCtr.Name, Resources: wantCtr.Resources})) - } - } -} - -func VerifyPodStatusResources(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) error { - ginkgo.GinkgoHelper() - - wantInitCtrs, wantCtrs := separateContainers(wantInfo) - var errs []error - if err := verifyPodContainersStatusResources(gotPod.Status.InitContainerStatuses, wantInitCtrs); err != nil { - errs = append(errs, err) - } - if err := verifyPodContainersStatusResources(gotPod.Status.ContainerStatuses, wantCtrs); err != nil { - errs = append(errs, err) - } - - return utilerrors.NewAggregate(errs) -} - -func verifyPodContainersStatusResources(gotCtrStatuses []v1.ContainerStatus, wantCtrs []v1.Container) error { - ginkgo.GinkgoHelper() - - var errs []error - if len(gotCtrStatuses) != len(wantCtrs) { - return fmt.Errorf("expectation length mismatch: got %d statuses, want %d", - len(gotCtrStatuses), len(wantCtrs)) - } - for i, wantCtr := range wantCtrs { - gotCtrStatus := gotCtrStatuses[i] - if gotCtrStatus.Name != wantCtr.Name { - errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, wantCtr.Name)) - continue - } - if err := framework.Gomega().Expect(*gotCtrStatus.Resources).To(gomega.Equal(wantCtr.Resources)); err != nil { - errs = append(errs, fmt.Errorf("container[%s] status resources mismatch: %w", wantCtr.Name, err)) - } - } - - return utilerrors.NewAggregate(errs) -} - -func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework, pod *v1.Pod, tcInfo []ResizableContainerInfo) error { - ginkgo.GinkgoHelper() - if podOnCgroupv2Node == nil { - value := IsPodOnCgroupv2Node(f, pod) - podOnCgroupv2Node = &value - } - cgroupMemLimit := Cgroupv2MemLimit - cgroupCPULimit := Cgroupv2CPULimit - cgroupCPURequest := Cgroupv2CPURequest - if !*podOnCgroupv2Node { - cgroupMemLimit = CgroupMemLimit - cgroupCPULimit = CgroupCPUQuota - cgroupCPURequest = CgroupCPUShares - } - - var errs []error - for _, ci := range tcInfo { - if ci.Resources == nil { - continue - } - tc := makeResizableContainer(ci) - if tc.Resources.Limits != nil || tc.Resources.Requests != nil { - var expectedCPUShares int64 - var expectedMemLimitString string - expectedMemLimitInBytes := tc.Resources.Limits.Memory().Value() - cpuRequest := tc.Resources.Requests.Cpu() - cpuLimit := tc.Resources.Limits.Cpu() - if cpuRequest.IsZero() && !cpuLimit.IsZero() { - expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuLimit.MilliValue())) - } else { - expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuRequest.MilliValue())) - } - - expectedCPULimits := GetCPULimitCgroupExpectations(cpuLimit) - expectedMemLimitString = strconv.FormatInt(expectedMemLimitInBytes, 10) - if *podOnCgroupv2Node { - if expectedMemLimitString == "0" { - expectedMemLimitString = "max" - } - // convert cgroup v1 cpu.shares value to cgroup v2 cpu.weight value - // https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2 - expectedCPUShares = int64(1 + ((expectedCPUShares-2)*9999)/262142) - } - - if expectedMemLimitString != "0" { - errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupMemLimit, expectedMemLimitString)) - } - errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimits...)) - errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10))) - // TODO(vinaykul,InPlacePodVerticalScaling): Verify oom_score_adj when runc adds support for updating it - // See https://github.com/opencontainers/runc/pull/4669 - } - } - return utilerrors.NewAggregate(errs) -} - -func verifyPodRestarts(f *framework.Framework, pod *v1.Pod, wantInfo []ResizableContainerInfo) error { - ginkgo.GinkgoHelper() - - initCtrStatuses, ctrStatuses := separateContainerStatuses(wantInfo) - errs := []error{} - if err := verifyContainerRestarts(f, pod, pod.Status.InitContainerStatuses, initCtrStatuses); err != nil { - errs = append(errs, err) - } - if err := verifyContainerRestarts(f, pod, pod.Status.ContainerStatuses, ctrStatuses); err != nil { - errs = append(errs, err) - } - - return utilerrors.NewAggregate(errs) -} - -func verifyContainerRestarts(f *framework.Framework, pod *v1.Pod, gotStatuses []v1.ContainerStatus, wantStatuses []v1.ContainerStatus) error { - ginkgo.GinkgoHelper() - - if len(gotStatuses) != len(wantStatuses) { - return fmt.Errorf("expectation length mismatch: got %d statuses, want %d", - len(gotStatuses), len(wantStatuses)) - } - - errs := []error{} - for i, gotStatus := range gotStatuses { - if gotStatus.RestartCount != wantStatuses[i].RestartCount { - errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", gotStatus.Name, gotStatus.RestartCount, wantStatuses[i].RestartCount)) - } else if gotStatus.RestartCount > 0 { - err := verifyOomScoreAdj(f, pod, gotStatus.Name) - if err != nil { - errs = append(errs, err) - } - } - } - return utilerrors.NewAggregate(errs) -} - -func verifyOomScoreAdj(f *framework.Framework, pod *v1.Pod, containerName string) error { - container := FindContainerInPod(pod, containerName) - if container == nil { - return fmt.Errorf("failed to find container %s in pod %s", containerName, pod.Name) - } - - node, err := f.ClientSet.CoreV1().Nodes().Get(context.Background(), pod.Spec.NodeName, metav1.GetOptions{}) - if err != nil { - return err - } - - nodeMemoryCapacity := node.Status.Capacity[v1.ResourceMemory] - oomScoreAdj := kubeqos.GetContainerOOMScoreAdjust(pod, container, int64(nodeMemoryCapacity.Value())) - expectedOomScoreAdj := strconv.FormatInt(int64(oomScoreAdj), 10) - - return VerifyOomScoreAdjValue(f, pod, container.Name, expectedOomScoreAdj) -} - -func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod, expectedContainers []ResizableContainerInfo) *v1.Pod { - ginkgo.GinkgoHelper() - // Wait for resize to complete. - - framework.ExpectNoError(framework.Gomega(). - Eventually(ctx, framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(pod.Namespace).Get, pod.Name, metav1.GetOptions{}))). - WithTimeout(f.Timeouts.PodStart). - Should(framework.MakeMatcher(func(pod *v1.Pod) (func() string, error) { - if helpers.IsPodResizeInfeasible(pod) { - // This is a terminal resize state - return func() string { - return "resize is infeasible" - }, nil - } - // TODO: Replace this check with a combination of checking the status.observedGeneration - // and the resize status when available. - if resourceErrs := VerifyPodStatusResources(pod, expectedContainers); resourceErrs != nil { - return func() string { - return fmt.Sprintf("container status resources don't match expected: %v", formatErrors(resourceErrs)) - }, nil - } - // Wait for kubelet to clear the resize status conditions. - for _, c := range pod.Status.Conditions { - if c.Type == v1.PodResizePending || c.Type == v1.PodResizeInProgress { - return func() string { - return fmt.Sprintf("resize status %v is still present in the pod status", c) - }, nil - } - } - // Wait for the pod to be ready. - if !podutils.IsPodReady(pod) { - return func() string { return "pod is not ready" }, nil - } - return nil, nil - })), - ) - - resizedPod, err := framework.GetObject(podClient.Get, pod.Name, metav1.GetOptions{})(ctx) - framework.ExpectNoError(err, "failed to get resized pod") - return resizedPod -} - -func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v1.Pod, expectedContainers []ResizableContainerInfo) { - ginkgo.GinkgoHelper() - - // Verify Pod Containers Cgroup Values - var errs []error - if cgroupErrs := VerifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers); cgroupErrs != nil { - errs = append(errs, fmt.Errorf("container cgroup values don't match expected: %w", formatErrors(cgroupErrs))) - } - if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil { - errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs))) - } - if restartErrs := verifyPodRestarts(f, resizedPod, expectedContainers); restartErrs != nil { - errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs))) - } - - // Verify Pod Resize conditions are empty. - for _, condition := range resizedPod.Status.Conditions { - if condition.Type == v1.PodResizeInProgress || condition.Type == v1.PodResizePending { - errs = append(errs, fmt.Errorf("unexpected resize condition type %s found in pod status", condition.Type)) - } - } - - if len(errs) > 0 { - resizedPod.ManagedFields = nil // Suppress managed fields in error output. - framework.ExpectNoError(formatErrors(utilerrors.NewAggregate(errs)), - "Verifying pod resources resize state. Pod: %s", framework.PrettyPrintJSON(resizedPod)) - } -} - -// ResizeContainerPatch generates a patch string to resize the pod container. -func ResizeContainerPatch(containers []ResizableContainerInfo) (string, error) { - var patch patchSpec - - for _, container := range containers { - var cPatch containerPatch - cPatch.Name = container.Name - cPatch.Resources.Requests.CPU = container.Resources.CPUReq - cPatch.Resources.Requests.Memory = container.Resources.MemReq - cPatch.Resources.Limits.CPU = container.Resources.CPULim - cPatch.Resources.Limits.Memory = container.Resources.MemLim - - patch.Spec.Containers = append(patch.Spec.Containers, cPatch) - } - - patchBytes, err := json.Marshal(patch) - if err != nil { - return "", err - } - - return string(patchBytes), nil -} - -// UpdateExpectedContainerRestarts updates the RestartCounts in expectedContainers by -// adding them to the existing RestartCounts in the containerStatuses of the provided pod. -// This reduces the flakiness of the RestartCount assertions by grabbing the current -// restart count right before the resize operation, and verify the expected increment (0 or 1) -// rather than the absolute count. -func UpdateExpectedContainerRestarts(ctx context.Context, pod *v1.Pod, expectedContainers []ResizableContainerInfo) []ResizableContainerInfo { - initialRestarts := make(map[string]int32) - newExpectedContainers := []ResizableContainerInfo{} - for _, ctr := range pod.Status.ContainerStatuses { - initialRestarts[ctr.Name] = ctr.RestartCount - } - for i, ctr := range expectedContainers { - newExpectedContainers = append(newExpectedContainers, expectedContainers[i]) - newExpectedContainers[i].RestartCount += initialRestarts[ctr.Name] - } - return newExpectedContainers -} - -func formatErrors(err error) error { - // Put each error on a new line for readability. - var agg utilerrors.Aggregate - if !errors.As(err, &agg) { - return err - } - - errStrings := make([]string, len(agg.Errors())) - for i, err := range agg.Errors() { - errStrings[i] = err.Error() - } - return fmt.Errorf("[\n%s\n]", strings.Join(errStrings, ",\n")) -} diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go index 6dc039587e5..ca2b63a13ba 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go @@ -26,7 +26,6 @@ import ( "time" "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -43,22 +42,6 @@ import ( // have their logs fetched. const LabelLogOnPodFailure = "log-on-pod-failure" -// TODO: Move to its own subpkg. -// expectNoError checks if "err" is set, and if so, fails assertion while logging the error. -func expectNoError(err error, explain ...interface{}) { - expectNoErrorWithOffset(1, err, explain...) -} - -// TODO: Move to its own subpkg. -// expectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller -// (for example, for call chain f -> g -> expectNoErrorWithOffset(1, ...) error would be logged for "f"). -func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) { - if err != nil { - framework.Logf("Unexpected error occurred: %v", err) - } - gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) -} - // PodsCreatedByLabel returns a created pod list matched by the given label. func PodsCreatedByLabel(ctx context.Context, c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) { timeout := 2 * time.Minute @@ -364,9 +347,9 @@ func CreateExecPodOrFail(ctx context.Context, client clientset.Interface, ns, ge tweak(pod) } execPod, err := client.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) - expectNoError(err, "failed to create new exec pod in namespace: %s", ns) + framework.ExpectNoErrorWithOffset(1, err, "failed to create new exec pod in namespace: %s", ns) err = WaitForPodNameRunningInNamespace(ctx, client, execPod.Name, execPod.Namespace) - expectNoError(err, "failed to create new exec pod in namespace: %s", ns) + framework.ExpectNoErrorWithOffset(1, err, "failed to create new exec pod in namespace: %s", ns) return execPod } diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go index db7106e863e..b1fd25bed6d 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go @@ -18,20 +18,16 @@ package pod import ( "fmt" - "strconv" - "strings" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - kubecm "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" psaapi "k8s.io/pod-security-admission/api" psapolicy "k8s.io/pod-security-admission/policy" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // This command runs an infinite loop, sleeping for 1 second in each iteration. @@ -66,6 +62,9 @@ func GetDefaultTestImage() string { // due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35. // If the node OS is linux, return busybox image func GetDefaultTestImageID() imageutils.ImageID { + if framework.NodeOSDistroIs("windows") { + return GetTestImageID(imageutils.Agnhost) + } return GetTestImageID(imageutils.BusyBox) } @@ -96,7 +95,7 @@ func GetDefaultNonRootUser() *int64 { if framework.NodeOSDistroIs("windows") { return nil } - return pointer.Int64(DefaultNonRootUser) + return ptr.To[int64](DefaultNonRootUser) } // GeneratePodSecurityContext generates the corresponding pod security context with the given inputs @@ -123,11 +122,11 @@ func GenerateContainerSecurityContext(level psaapi.Level) *v1.SecurityContext { switch level { case psaapi.LevelBaseline: return &v1.SecurityContext{ - Privileged: pointer.Bool(false), + Privileged: ptr.To(false), } case psaapi.LevelPrivileged: return &v1.SecurityContext{ - Privileged: pointer.Bool(true), + Privileged: ptr.To(true), } case psaapi.LevelRestricted: return GetRestrictedContainerSecurityContext() @@ -158,14 +157,14 @@ const DefaultNonRootUserName = "ContainerUser" // Tests that require a specific user ID should override this. func GetRestrictedPodSecurityContext() *v1.PodSecurityContext { psc := &v1.PodSecurityContext{ - RunAsNonRoot: pointer.Bool(true), + RunAsNonRoot: ptr.To(true), RunAsUser: GetDefaultNonRootUser(), SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}, } if framework.NodeOSDistroIs("windows") { psc.WindowsOptions = &v1.WindowsSecurityContextOptions{} - psc.WindowsOptions.RunAsUserName = pointer.String(DefaultNonRootUserName) + psc.WindowsOptions.RunAsUserName = ptr.To(DefaultNonRootUserName) } return psc @@ -174,7 +173,7 @@ func GetRestrictedPodSecurityContext() *v1.PodSecurityContext { // GetRestrictedContainerSecurityContext returns a minimal restricted container security context. func GetRestrictedContainerSecurityContext() *v1.SecurityContext { return &v1.SecurityContext{ - AllowPrivilegeEscalation: pointer.Bool(false), + AllowPrivilegeEscalation: ptr.To(false), Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}}, } } @@ -198,7 +197,7 @@ func MixinRestrictedPodSecurity(pod *v1.Pod) error { pod.Spec.SecurityContext = GetRestrictedPodSecurityContext() } else { if pod.Spec.SecurityContext.RunAsNonRoot == nil { - pod.Spec.SecurityContext.RunAsNonRoot = pointer.Bool(true) + pod.Spec.SecurityContext.RunAsNonRoot = ptr.To(true) } if pod.Spec.SecurityContext.RunAsUser == nil { pod.Spec.SecurityContext.RunAsUser = GetDefaultNonRootUser() @@ -208,7 +207,7 @@ func MixinRestrictedPodSecurity(pod *v1.Pod) error { } if framework.NodeOSDistroIs("windows") && pod.Spec.SecurityContext.WindowsOptions == nil { pod.Spec.SecurityContext.WindowsOptions = &v1.WindowsSecurityContextOptions{} - pod.Spec.SecurityContext.WindowsOptions.RunAsUserName = pointer.String(DefaultNonRootUserName) + pod.Spec.SecurityContext.WindowsOptions.RunAsUserName = ptr.To(DefaultNonRootUserName) } } for i := range pod.Spec.Containers { @@ -238,7 +237,7 @@ func mixinRestrictedContainerSecurityContext(container *v1.Container) { container.SecurityContext = GetRestrictedContainerSecurityContext() } else { if container.SecurityContext.AllowPrivilegeEscalation == nil { - container.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false) + container.SecurityContext.AllowPrivilegeEscalation = ptr.To(false) } if container.SecurityContext.Capabilities == nil { container.SecurityContext.Capabilities = &v1.Capabilities{} @@ -293,86 +292,3 @@ func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerSt } return nil } - -// VerifyCgroupValue verifies that the given cgroup path has the expected value in -// the specified container of the pod. It execs into the container to retrieve the -// cgroup value, and ensures that the retrieved cgroup value is equivalent to at -// least one of the values in expectedCgValues. -func VerifyCgroupValue(f *framework.Framework, pod *v1.Pod, cName, cgPath string, expectedCgValues ...string) error { - cmd := fmt.Sprintf("head -n 1 %s", cgPath) - framework.Logf("Namespace %s Pod %s Container %s - looking for one of the expected cgroup values %s in path %s", - pod.Namespace, pod.Name, cName, expectedCgValues, cgPath) - cgValue, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd) - if err != nil { - return fmt.Errorf("failed to find one of the expected cgroup values %q in container cgroup %q", expectedCgValues, cgPath) - } - cgValue = strings.Trim(cgValue, "\n") - - if err := framework.Gomega().Expect(cgValue).To(gomega.BeElementOf(expectedCgValues)); err != nil { - return fmt.Errorf("value of cgroup %q for container %q should match one of the expectations: %w", cgPath, cName, err) - } - - return nil -} - -// VerifyOomScoreAdjValue verifies that oom_score_adj for pid 1 (pidof init/systemd -> app) -// has the expected value in specified container of the pod. It execs into the container, -// reads the oom_score_adj value from procfs, and compares it against the expected value. -func VerifyOomScoreAdjValue(f *framework.Framework, pod *v1.Pod, cName, expectedOomScoreAdj string) error { - cmd := "cat /proc/1/oom_score_adj" - framework.Logf("Namespace %s Pod %s Container %s - looking for oom_score_adj value %s", - pod.Namespace, pod.Name, cName, expectedOomScoreAdj) - oomScoreAdj, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd) - if err != nil { - return fmt.Errorf("failed to find expected value %s for container app process", expectedOomScoreAdj) - } - oomScoreAdj = strings.Trim(oomScoreAdj, "\n") - if oomScoreAdj != expectedOomScoreAdj { - return fmt.Errorf("oom_score_adj value %s not equal to expected %s", oomScoreAdj, expectedOomScoreAdj) - } - return nil -} - -// IsPodOnCgroupv2Node checks whether the pod is running on cgroupv2 node. -// TODO: Deduplicate this function with NPD cluster e2e test: -// https://github.com/kubernetes/kubernetes/blob/2049360379bcc5d6467769cef112e6e492d3d2f0/test/e2e/node/node_problem_detector.go#L369 -func IsPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool { - cmd := "mount -t cgroup2" - out, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", cmd) - if err != nil { - return false - } - return len(out) != 0 -} - -// TODO: Remove the rounded cpu limit values when https://github.com/opencontainers/runc/issues/4622 -// is fixed. -func GetCPULimitCgroupExpectations(cpuLimit *resource.Quantity) []string { - var expectedCPULimits []string - milliCPULimit := cpuLimit.MilliValue() - - cpuQuota := kubecm.MilliCPUToQuota(milliCPULimit, kubecm.QuotaPeriod) - if cpuLimit.IsZero() { - cpuQuota = -1 - } - expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuota)) - - if milliCPULimit%10 != 0 && cpuQuota != -1 { - roundedCPULimit := (milliCPULimit/10 + 1) * 10 - cpuQuotaRounded := kubecm.MilliCPUToQuota(roundedCPULimit, kubecm.QuotaPeriod) - expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuotaRounded)) - } - - return expectedCPULimits -} - -func getExpectedCPULimitFromCPUQuota(cpuQuota int64) string { - expectedCPULimitString := strconv.FormatInt(cpuQuota, 10) - if *podOnCgroupv2Node { - if expectedCPULimitString == "-1" { - expectedCPULimitString = "max" - } - expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod) - } - return expectedCPULimitString -} diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go index abe4bfda133..bb700080068 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go @@ -28,6 +28,7 @@ import ( "os" "path" "path/filepath" + "slices" "sort" "strings" "time" @@ -613,6 +614,19 @@ func AfterReadingAllFlags(t *TestContextType) { } ginkgo.ReportAfterSuite("Kubernetes e2e JUnit report", func(report ginkgo.Report) { + // Sort specs by full name. The default is by start (or completion?) time, + // which is less useful in spyglass because those times are not shown + // and thus tests seem to be listed with no apparent order. + slices.SortFunc(report.SpecReports, func(a, b types.SpecReport) int { + res := strings.Compare(a.FullText(), b.FullText()) + if res == 0 { + // Use start time as tie-breaker in the unlikely + // case that two specs have the same full name. + return a.StartTime.Compare(b.StartTime) + } + return res + }) + // With Ginkgo v1, we used to write one file per // parallel node. Now Ginkgo v2 automatically merges // all results into a report for us. The 01 suffix is diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/util.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/util.go index 7eed8e36751..ba6a7c77d10 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/util.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/framework/util.go @@ -413,6 +413,7 @@ func CheckTestingNSDeletedExcept(ctx context.Context, c clientset.Interface, ski // WaitForServiceEndpointsNum waits until there are EndpointSlices for serviceName // containing a total of expectNum endpoints. (If the service is dual-stack, expectNum // must count the endpoints of both IP families.) +// Deprecated: use e2eendpointslice.WaitForEndpointCount or other related functions. func WaitForServiceEndpointsNum(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) { Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go index 94c84310001..a0cdafe18a6 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go @@ -29,7 +29,6 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -316,7 +315,7 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace, PatchName(f, &item.Name) case *storagev1.StorageClass: PatchName(f, &item.Name) - case *storagev1beta1.VolumeAttributesClass: + case *storagev1.VolumeAttributesClass: PatchName(f, &item.Name) case *storagev1.CSIDriver: PatchName(f, &item.Name) @@ -625,16 +624,16 @@ func (*storageClassFactory) Create(ctx context.Context, f *framework.Framework, type volumeAttributesClassFactory struct{} func (f *volumeAttributesClassFactory) New() runtime.Object { - return &storagev1beta1.VolumeAttributesClass{} + return &storagev1.VolumeAttributesClass{} } func (*volumeAttributesClassFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) { - item, ok := i.(*storagev1beta1.VolumeAttributesClass) + item, ok := i.(*storagev1.VolumeAttributesClass) if !ok { return nil, errorItemNotSupported } - client := f.ClientSet.StorageV1beta1().VolumeAttributesClasses() + client := f.ClientSet.StorageV1().VolumeAttributesClasses() if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil { return nil, fmt.Errorf("create VolumeAttributesClass: %w", err) } diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/file.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/file.go new file mode 100644 index 00000000000..a6a91331904 --- /dev/null +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/file.go @@ -0,0 +1,41 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "hash/crc32" +) + +// The max length for ntfs, ext4, xfs and btrfs. +const maxFileNameLength = 255 + +// Shorten a file name to size allowed by the most common filesystems. +// If the filename is too long, cut it + add a short hash (crc32) that makes it unique. +// Note that the input should be a single file / directory name, not a path +// composed of several directories. +func ShortenFileName(filename string) string { + if len(filename) <= maxFileNameLength { + return filename + } + + hash := crc32.ChecksumIEEE([]byte(filename)) + hashString := fmt.Sprintf("%x", hash) + hashLen := len(hashString) + + return fmt.Sprintf("%s-%s", filename[:maxFileNameLength-1-hashLen], hashString) +} diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go b/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go index 603f86e2ce8..a4d72721fad 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go @@ -22,8 +22,8 @@ import ( "io" "os" "path" + "path/filepath" "regexp" - "strings" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -68,6 +68,12 @@ func StartPodLogs(ctx context.Context, f *framework.Framework, driverNamespace * testName = append(testName, reg.ReplaceAllString(test.LeafNodeText, "_")) } } + + // Make sure each directory name is short enough for Linux + Windows + for i, testNameComponent := range testName { + testName[i] = ShortenFileName(testNameComponent) + } + // We end the prefix with a slash to ensure that all logs // end up in a directory named after the current test. // @@ -76,7 +82,7 @@ func StartPodLogs(ctx context.Context, f *framework.Framework, driverNamespace * // keeps each directory name smaller (the full test // name at one point exceeded 256 characters, which was // too much for some filesystems). - logDir := framework.TestContext.ReportDir + "/" + strings.Join(testName, "/") + logDir := filepath.Join(framework.TestContext.ReportDir, filepath.Join(testName...)) to.LogPathPrefix = logDir + "/" err := os.MkdirAll(logDir, 0755) diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/gpu/gce/nvidia-driver-installer.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/gpu/gce/nvidia-driver-installer.yaml index c23670087c3..24be2839bd1 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/gpu/gce/nvidia-driver-installer.yaml +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/gpu/gce/nvidia-driver-installer.yaml @@ -142,6 +142,6 @@ spec: - name: nvidia-config mountPath: /etc/nvidia containers: - - image: "registry.k8s.io/pause:3.10" + - image: "registry.k8s.io/pause:3.10.1" name: pause diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml index 60a1a6fabaa..11fe740b9cb 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml @@ -17,7 +17,7 @@ spec: spec: containers: - name: master - image: docker.io/library/redis:5.0.5-alpine + image: registry.k8s.io/e2e-test-images/redis:5.0.5-alpine resources: requests: cpu: 100m diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml index 3376794b345..7f46fcbdd9d 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml @@ -17,7 +17,7 @@ spec: spec: containers: - name: slave - image: docker.io/library/redis:5.0.5-alpine + image: registry.k8s.io/e2e-test-images/redis:5.0.5-alpine # We are only implementing the dns option of: # https://github.com/kubernetes/examples/blob/97c7ed0eb6555a4b667d2877f965d392e00abc45/guestbook/redis-slave/run.sh command: [ "redis-server", "--slaveof", "redis-master", "6379" ] diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml index 9b54660865d..cd2916ea67f 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml @@ -8,7 +8,7 @@ metadata: spec: containers: - name: primary - image: registry.k8s.io/e2e-test-images/agnhost:2.32 + image: registry.k8s.io/e2e-test-images/agnhost:2.54 env: - name: PRIMARY value: "true" @@ -21,7 +21,7 @@ spec: - mountPath: /agnhost-primary-data name: data - name: sentinel - image: registry.k8s.io/e2e-test-images/agnhost:2.32 + image: registry.k8s.io/e2e-test-images/agnhost:2.54 env: - name: SENTINEL value: "true" diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/service.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/service.yaml deleted file mode 100644 index 5499b375c34..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: nginx - labels: - app: nginx -spec: - ports: - - port: 80 - name: web - clusterIP: None - selector: - app: nginx \ No newline at end of file diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml deleted file mode 100644 index abab8e89b3e..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: web -spec: - serviceName: "nginx" - replicas: 3 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: nginx - image: {{.NginxImageNew}} - ports: - - containerPort: 80 - name: web - volumeMounts: - - name: www - mountPath: /usr/share/nginx/html - volumeClaimTemplates: - - metadata: - name: www - annotations: - volume.beta.kubernetes.io/storage-class: nginx-sc - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml deleted file mode 100644 index 528ef6dad57..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# A headless service to create DNS records -apiVersion: v1 -kind: Service -metadata: - name: redis - labels: - app: redis -spec: - ports: - - port: 6379 - name: peer - # *.redis.default.svc.cluster.local - clusterIP: None - selector: - app: redis - publishNotReadyAddresses: true diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml deleted file mode 100644 index 84e0a82bc40..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/statefulset.yaml +++ /dev/null @@ -1,81 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: rd -spec: - serviceName: "redis" - replicas: 3 - selector: - matchLabels: - app: redis - template: - metadata: - labels: - app: redis - spec: - initContainers: - - name: install - image: registry.k8s.io/e2e-test-images/pets/redis-installer:1.5 - imagePullPolicy: Always - args: - - "--install-into=/opt" - - "--work-dir=/work-dir" - volumeMounts: - - name: opt - mountPath: "/opt" - - name: workdir - mountPath: "/work-dir" - - name: bootstrap - image: debian:jessie - command: - - "/work-dir/peer-finder" - args: - - -on-start="/work-dir/on-start.sh" - - "-service=redis" - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - volumeMounts: - - name: opt - mountPath: "/opt" - - name: workdir - mountPath: "/work-dir" - containers: - - name: redis - image: debian:jessie - ports: - - containerPort: 6379 - name: peer - command: - - /opt/redis/redis-server - args: - - /opt/redis/redis.conf - readinessProbe: - exec: - command: - - sh - - -c - - "/opt/redis/redis-cli -h $(hostname) ping" - initialDelaySeconds: 15 - timeoutSeconds: 5 - volumeMounts: - - name: datadir - mountPath: /data - - name: opt - mountPath: /opt - volumes: - - name: opt - emptyDir: {} - - name: workdir - emptyDir: {} - volumeClaimTemplates: - - metadata: - name: datadir - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/populator.storage.k8s.io_volumepopulators.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/populator.storage.k8s.io_volumepopulators.yaml index 98cd376a1cb..488853fbfd2 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/populator.storage.k8s.io_volumepopulators.yaml +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/populator.storage.k8s.io_volumepopulators.yaml @@ -4,7 +4,6 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.5.0 api-approved.kubernetes.io: https://github.com/kubernetes/enhancements/pull/2934 - creationTimestamp: null name: volumepopulators.populator.storage.k8s.io spec: group: populator.storage.k8s.io diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml index 666dd4e0f70..35dc8c4b29d 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml @@ -354,7 +354,7 @@ spec: name: socket-dir - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v8.3.0 args: - -v=5 - --csi-address=/csi/csi.sock diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh index 0ff1f9cb584..f62b40d8483 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh @@ -266,6 +266,8 @@ run_tests() { export KUBE_CONTAINER_RUNTIME=remote export KUBE_CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock export KUBE_CONTAINER_RUNTIME_NAME=containerd + export SNAPSHOTTER_VERSION="${SNAPSHOTTER_VERSION:-v8.3.0}" + echo "SNAPSHOTTER_VERSION is $SNAPSHOTTER_VERSION" # ginkgo can take forever to exit, so we run it in the background and save the # PID, bash will not run traps while waiting on a process, but it will while # running a builtin like `wait`, saving the PID also allows us to forward the @@ -278,10 +280,10 @@ run_tests() { kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml || exit 1 kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml || exit 1 - kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.2.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml || exit 1 - curl -s https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.2.0/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | \ + kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/"${SNAPSHOTTER_VERSION}"/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml || exit 1 + curl -s https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/"${SNAPSHOTTER_VERSION}"/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | \ awk '/--leader-election=true/ {print; print " - \"--feature-gates=CSIVolumeGroupSnapshot=true\""; next}1' | \ -sed 's|image: registry.k8s.io/sig-storage/snapshot-controller:v8.0.1|image: registry.k8s.io/sig-storage/snapshot-controller:v8.2.0|' | \ +sed "s|image: registry.k8s.io/sig-storage/snapshot-controller:.*|image: registry.k8s.io/sig-storage/snapshot-controller:${SNAPSHOTTER_VERSION}|" | \ kubectl apply -f - || exit 1 diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml index 86d07389f1e..ed77430bf0c 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml @@ -21,7 +21,7 @@ spec: serviceAccountName: csi-gce-pd-controller-sa containers: - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v8.3.0 args: - "--v=5" - "--csi-address=/csi/csi.sock" diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml index 3671e05c185..a5af9814a80 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml @@ -354,7 +354,7 @@ spec: name: socket-dir - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v8.3.0 args: - -v=5 - --csi-address=/csi/csi.sock diff --git a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml index 2be6611d3e9..cf5bbba29ab 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml +++ b/e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v8.3.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" diff --git a/e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index dae697066ba..412ae957eec 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -28,7 +28,7 @@ import ( "regexp" "strings" - yaml "sigs.k8s.io/yaml/goyaml.v2" + yaml "go.yaml.in/yaml/v2" ) // RegistryList holds public and private image registries @@ -208,8 +208,6 @@ const ( Pause // Perl image Perl - // Redis image - Redis // RegressionIssue74839 image RegressionIssue74839 // ResourceConsumer image @@ -222,14 +220,14 @@ const ( func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) { configs := map[ImageID]Config{} - configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.53"} + configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.56"} configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"} configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"} configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.29.2"} configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"} - configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.7"} - configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.21-0"} + configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.37.0-1"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.8"} + configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.6.4-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"} @@ -246,9 +244,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[Nonewprivs] = Config{list.PromoterE2eRegistry, "nonewprivs", "1.3"} configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.4"} // Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go - configs[Pause] = Config{list.GcRegistry, "pause", "3.10"} + configs[Pause] = Config{list.GcRegistry, "pause", "3.10.1"} configs[Perl] = Config{list.PromoterE2eRegistry, "perl", "5.26"} - configs[Redis] = Config{list.PromoterE2eRegistry, "redis", "5.0.5-3"} configs[RegressionIssue74839] = Config{list.PromoterE2eRegistry, "regression-issue-74839", "1.2"} configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.13"} configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.4"} diff --git a/e2e/vendor/k8s.io/kubernetes/test/utils/runners.go b/e2e/vendor/k8s.io/kubernetes/test/utils/runners.go index 29a66bf2fe7..c8b8bc94759 100644 --- a/e2e/vendor/k8s.io/kubernetes/test/utils/runners.go +++ b/e2e/vendor/k8s.io/kubernetes/test/utils/runners.go @@ -44,7 +44,7 @@ import ( scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/util/workqueue" imageutils "k8s.io/kubernetes/test/utils/image" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "k8s.io/klog/v2" ) @@ -119,11 +119,12 @@ type RCConfig struct { Timeout time.Duration PodStatusFile *os.File Replicas int - CpuRequest int64 // millicores - CpuLimit int64 // millicores - MemRequest int64 // bytes - MemLimit int64 // bytes - GpuLimit int64 // count + CPURequest int64 // millicores + CPULimit int64 // millicores + MemRequest int64 // bytes + MemLimit int64 // bytes + GpuLimit int64 // count + PodResources *v1.ResourceRequirements // Pod-level resources ReadinessProbe *v1.Probe DNSPolicy *v1.DNSPolicy PriorityClassName string @@ -302,7 +303,7 @@ func (config *DeploymentConfig) create() error { Name: config.Name, }, Spec: apps.DeploymentSpec{ - Replicas: pointer.Int32(int32(config.Replicas)), + Replicas: ptr.To[int32](int32(config.Replicas)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "name": config.Name, @@ -331,6 +332,10 @@ func (config *DeploymentConfig) create() error { }, } + if config.PodResources != nil { + deployment.Spec.Template.Spec.Resources = config.PodResources.DeepCopy() + } + if len(config.AdditionalContainers) > 0 { deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, config.AdditionalContainers...) } @@ -373,7 +378,7 @@ func (config *ReplicaSetConfig) create() error { Name: config.Name, }, Spec: apps.ReplicaSetSpec{ - Replicas: pointer.Int32(int32(config.Replicas)), + Replicas: ptr.To[int32](int32(config.Replicas)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "name": config.Name, @@ -402,6 +407,10 @@ func (config *ReplicaSetConfig) create() error { }, } + if config.PodResources != nil { + rs.Spec.Template.Spec.Resources = config.PodResources.DeepCopy() + } + if len(config.AdditionalContainers) > 0 { rs.Spec.Template.Spec.Containers = append(rs.Spec.Template.Spec.Containers, config.AdditionalContainers...) } @@ -445,7 +454,7 @@ func (config *RCConfig) create() error { Name: config.Name, }, Spec: v1.ReplicationControllerSpec{ - Replicas: pointer.Int32(int32(config.Replicas)), + Replicas: ptr.To[int32](int32(config.Replicas)), Selector: map[string]string{ "name": config.Name, }, @@ -478,6 +487,10 @@ func (config *RCConfig) create() error { }, } + if config.PodResources != nil { + rc.Spec.Template.Spec.Resources = config.PodResources.DeepCopy() + } + if len(config.AdditionalContainers) > 0 { rc.Spec.Template.Spec.Containers = append(rc.Spec.Template.Spec.Containers, config.AdditionalContainers...) } @@ -521,20 +534,20 @@ func (config *RCConfig) applyTo(template *v1.PodTemplateSpec) { c := &template.Spec.Containers[0] c.Ports = append(c.Ports, v1.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)}) } - if config.CpuLimit > 0 || config.MemLimit > 0 || config.GpuLimit > 0 { + if config.CPULimit > 0 || config.MemLimit > 0 || config.GpuLimit > 0 { template.Spec.Containers[0].Resources.Limits = v1.ResourceList{} } - if config.CpuLimit > 0 { - template.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI) + if config.CPULimit > 0 { + template.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(config.CPULimit, resource.DecimalSI) } if config.MemLimit > 0 { template.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI) } - if config.CpuRequest > 0 || config.MemRequest > 0 { + if config.CPURequest > 0 || config.MemRequest > 0 { template.Spec.Containers[0].Resources.Requests = v1.ResourceList{} } - if config.CpuRequest > 0 { - template.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI) + if config.CPURequest > 0 { + template.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(config.CPURequest, resource.DecimalSI) } if config.MemRequest > 0 { template.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI) diff --git a/e2e/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go b/e2e/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go deleted file mode 100644 index 6bf0ea8ce09..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go +++ /dev/null @@ -1,102 +0,0 @@ -package expansion - -import ( - "bytes" -) - -const ( - operator = '$' - referenceOpener = '(' - referenceCloser = ')' -) - -// syntaxWrap returns the input string wrapped by the expansion syntax. -func syntaxWrap(input string) string { - return string(operator) + string(referenceOpener) + input + string(referenceCloser) -} - -// MappingFuncFor returns a mapping function for use with Expand that -// implements the expansion semantics defined in the expansion spec; it -// returns the input string wrapped in the expansion syntax if no mapping -// for the input is found. -func MappingFuncFor(context ...map[string]string) func(string) string { - return func(input string) string { - for _, vars := range context { - val, ok := vars[input] - if ok { - return val - } - } - - return syntaxWrap(input) - } -} - -// Expand replaces variable references in the input string according to -// the expansion spec using the given mapping function to resolve the -// values of variables. -func Expand(input string, mapping func(string) string) string { - var buf bytes.Buffer - checkpoint := 0 - for cursor := 0; cursor < len(input); cursor++ { - if input[cursor] == operator && cursor+1 < len(input) { - // Copy the portion of the input string since the last - // checkpoint into the buffer - buf.WriteString(input[checkpoint:cursor]) - - // Attempt to read the variable name as defined by the - // syntax from the input string - read, isVar, advance := tryReadVariableName(input[cursor+1:]) - - if isVar { - // We were able to read a variable name correctly; - // apply the mapping to the variable name and copy the - // bytes into the buffer - buf.WriteString(mapping(read)) - } else { - // Not a variable name; copy the read bytes into the buffer - buf.WriteString(read) - } - - // Advance the cursor in the input string to account for - // bytes consumed to read the variable name expression - cursor += advance - - // Advance the checkpoint in the input string - checkpoint = cursor + 1 - } - } - - // Return the buffer and any remaining unwritten bytes in the - // input string. - return buf.String() + input[checkpoint:] -} - -// tryReadVariableName attempts to read a variable name from the input -// string and returns the content read from the input, whether that content -// represents a variable name to perform mapping on, and the number of bytes -// consumed in the input string. -// -// The input string is assumed not to contain the initial operator. -func tryReadVariableName(input string) (string, bool, int) { - switch input[0] { - case operator: - // Escaped operator; return it. - return input[0:1], false, 1 - case referenceOpener: - // Scan to expression closer - for i := 1; i < len(input); i++ { - if input[i] == referenceCloser { - return input[1:i], true, i + 1 - } - } - - // Incomplete reference; return it. - return string(operator) + string(referenceOpener), false, 1 - default: - // Not the beginning of an expression, ie, an operator - // that doesn't begin an expression. Return the operator - // and the first rune in the string. - return (string(operator) + string(input[0])), false, 1 - } -} diff --git a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/LICENSE b/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/LICENSE deleted file mode 100644 index 27448585ad4..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/NOTICE b/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/NOTICE deleted file mode 100644 index 5c97abce4b9..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/NOTICE +++ /dev/null @@ -1,17 +0,0 @@ -runc - -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/apparmor/apparmor_linux.go b/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/apparmor/apparmor_linux.go deleted file mode 100644 index f83cbb225a3..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/apparmor/apparmor_linux.go +++ /dev/null @@ -1,22 +0,0 @@ -package apparmor - -import ( - "os" - "sync" -) - -var ( - appArmorEnabled bool - checkAppArmor sync.Once -) - -// IsEnabled returns true if apparmor is enabled for the host. -func IsEnabled() bool { - checkAppArmor.Do(func() { - if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil { - buf, err := os.ReadFile("/sys/module/apparmor/parameters/enabled") - appArmorEnabled = err == nil && len(buf) > 1 && buf[0] == 'Y' - } - }) - return appArmorEnabled -} diff --git a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/apparmor/apparmor_unsupported.go b/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/apparmor/apparmor_unsupported.go deleted file mode 100644 index 28be1d2e875..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/apparmor/apparmor_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !linux -// +build !linux - -package apparmor - -func IsEnabled() bool { - return false -} diff --git a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/utils/utils.go b/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/utils/utils.go deleted file mode 100644 index 76a09333019..00000000000 --- a/e2e/vendor/k8s.io/kubernetes/third_party/forked/libcontainer/utils/utils.go +++ /dev/null @@ -1,35 +0,0 @@ -package utils - -import ( - "os" - "path/filepath" -) - -// CleanPath makes a path safe for use with filepath.Join. This is done by not -// only cleaning the path, but also (if the path is relative) adding a leading -// '/' and cleaning it (then removing the leading '/'). This ensures that a -// path resulting from prepending another path will always resolve to lexically -// be a subdirectory of the prefixed path. This is all done lexically, so paths -// that include symlinks won't be safe as a result of using CleanPath. -func CleanPath(path string) string { - // Deal with empty strings nicely. - if path == "" { - return "" - } - - // Ensure that all paths are cleaned (especially problematic ones like - // "/../../../../../" which can cause lots of issues). - path = filepath.Clean(path) - - // If the path isn't absolute, we need to do more processing to fix paths - // such as "../../../..//some/path". We also shouldn't convert absolute - // paths to relative ones. - if !filepath.IsAbs(path) { - path = filepath.Clean(string(os.PathSeparator) + path) - // This can't fail, as (by definition) all paths are relative to root. - path, _ = filepath.Rel(string(os.PathSeparator), path) - } - - // Clean the path again for good measure. - return filepath.Clean(path) -} diff --git a/e2e/vendor/k8s.io/mount-utils/README.md b/e2e/vendor/k8s.io/mount-utils/README.md index ee7c8e89af6..02a46dd89c3 100644 --- a/e2e/vendor/k8s.io/mount-utils/README.md +++ b/e2e/vendor/k8s.io/mount-utils/README.md @@ -1,3 +1,8 @@ +> ⚠️ **This is an automatically published [staged repository](https://git.k8s.io/kubernetes/staging#external-repository-staging-area) for Kubernetes**. +> Contributions, including issues and pull requests, should be made to the main Kubernetes repository: [https://github.com/kubernetes/kubernetes](https://github.com/kubernetes/kubernetes). +> This repository is read-only for importing, and not used for direct contributions. +> See [CONTRIBUTING.md](./CONTRIBUTING.md) for more details. + ## Purpose This repository defines an interface to mounting filesystems to be consumed by diff --git a/e2e/vendor/k8s.io/mount-utils/mount.go b/e2e/vendor/k8s.io/mount-utils/mount.go index 0bd91ee617b..1fdae690329 100644 --- a/e2e/vendor/k8s.io/mount-utils/mount.go +++ b/e2e/vendor/k8s.io/mount-utils/mount.go @@ -100,7 +100,7 @@ type MounterForceUnmounter interface { } // MountPoint represents a single line in /proc/mounts or /etc/fstab. -type MountPoint struct { // nolint: golint +type MountPoint struct { Device string Path string Type string @@ -109,7 +109,7 @@ type MountPoint struct { // nolint: golint Pass int } -type MountErrorType string // nolint: golint +type MountErrorType string const ( FilesystemMismatch MountErrorType = "FilesystemMismatch" @@ -120,7 +120,7 @@ const ( UnknownMountError MountErrorType = "UnknownMountError" ) -type MountError struct { // nolint: golint +type MountError struct { Type MountErrorType Message string } diff --git a/e2e/vendor/k8s.io/mount-utils/mount_helper_unix.go b/e2e/vendor/k8s.io/mount-utils/mount_helper_unix.go index 1c603dca7a0..6c74eb9ca7c 100644 --- a/e2e/vendor/k8s.io/mount-utils/mount_helper_unix.go +++ b/e2e/vendor/k8s.io/mount-utils/mount_helper_unix.go @@ -71,7 +71,7 @@ func IsCorruptedMnt(err error) bool { } // MountInfo represents a single line in /proc//mountinfo. -type MountInfo struct { // nolint: golint +type MountInfo struct { // Unique ID for the mount (maybe reused after umount). ID int // The ID of the parent mount (or of self for the root of this mount namespace's mount tree). diff --git a/e2e/vendor/k8s.io/utils/buffer/ring_growing.go b/e2e/vendor/k8s.io/utils/buffer/ring_growing.go index 86965a51311..0f6d31d3e26 100644 --- a/e2e/vendor/k8s.io/utils/buffer/ring_growing.go +++ b/e2e/vendor/k8s.io/utils/buffer/ring_growing.go @@ -16,31 +16,57 @@ limitations under the License. package buffer +// defaultRingSize defines the default ring size if not specified +const defaultRingSize = 16 + +// RingGrowingOptions sets parameters for [RingGrowing] and +// [TypedRingGrowing]. +type RingGrowingOptions struct { + // InitialSize is the number of pre-allocated elements in the + // initial underlying storage buffer. + InitialSize int +} + // RingGrowing is a growing ring buffer. // Not thread safe. -type RingGrowing struct { - data []interface{} +// +// Deprecated: Use TypedRingGrowing[any] instead. +type RingGrowing = TypedRingGrowing[any] + +// NewRingGrowing constructs a new RingGrowing instance with provided parameters. +// +// Deprecated: Use NewTypedRingGrowing[any] instead. +func NewRingGrowing(initialSize int) *RingGrowing { + return NewTypedRingGrowing[any](RingGrowingOptions{InitialSize: initialSize}) +} + +// TypedRingGrowing is a growing ring buffer. +// The zero value has an initial size of 0 and is ready to use. +// Not thread safe. +type TypedRingGrowing[T any] struct { + data []T n int // Size of Data beg int // First available element readable int // Number of data items available } -// NewRingGrowing constructs a new RingGrowing instance with provided parameters. -func NewRingGrowing(initialSize int) *RingGrowing { - return &RingGrowing{ - data: make([]interface{}, initialSize), - n: initialSize, +// NewTypedRingGrowing constructs a new TypedRingGrowing instance with provided parameters. +func NewTypedRingGrowing[T any](opts RingGrowingOptions) *TypedRingGrowing[T] { + return &TypedRingGrowing[T]{ + data: make([]T, opts.InitialSize), + n: opts.InitialSize, } } // ReadOne reads (consumes) first item from the buffer if it is available, otherwise returns false. -func (r *RingGrowing) ReadOne() (data interface{}, ok bool) { +func (r *TypedRingGrowing[T]) ReadOne() (data T, ok bool) { if r.readable == 0 { - return nil, false + return } r.readable-- element := r.data[r.beg] - r.data[r.beg] = nil // Remove reference to the object to help GC + var zero T + r.data[r.beg] = zero // Remove reference to the object to help GC if r.beg == r.n-1 { // Was the last element r.beg = 0 @@ -51,11 +77,14 @@ func (r *RingGrowing) ReadOne() (data interface{}, ok bool) { } // WriteOne adds an item to the end of the buffer, growing it if it is full. -func (r *RingGrowing) WriteOne(data interface{}) { +func (r *TypedRingGrowing[T]) WriteOne(data T) { if r.readable == r.n { // Time to grow newN := r.n * 2 - newData := make([]interface{}, newN) + if newN == 0 { + newN = defaultRingSize + } + newData := make([]T, newN) to := r.beg + r.readable if to <= r.n { copy(newData, r.data[r.beg:to]) @@ -70,3 +99,72 @@ func (r *RingGrowing) WriteOne(data interface{}) { r.data[(r.readable+r.beg)%r.n] = data r.readable++ } + +// Len returns the number of items in the buffer. +func (r *TypedRingGrowing[T]) Len() int { + return r.readable +} + +// Cap returns the capacity of the buffer. +func (r *TypedRingGrowing[T]) Cap() int { + return r.n +} + +// RingOptions sets parameters for [Ring]. +type RingOptions struct { + // InitialSize is the number of pre-allocated elements in the + // initial underlying storage buffer. + InitialSize int + // NormalSize is the number of elements to allocate for new storage + // buffers once the Ring is consumed and + // can shrink again. + NormalSize int +} + +// Ring is a dynamically-sized ring buffer which can grow and shrink as-needed. +// The zero value has an initial size and normal size of 0 and is ready to use. +// Not thread safe. +type Ring[T any] struct { + growing TypedRingGrowing[T] + normalSize int // Limits the size of the buffer that is kept for reuse. Read-only. +} + +// NewRing constructs a new Ring instance with provided parameters. +func NewRing[T any](opts RingOptions) *Ring[T] { + return &Ring[T]{ + growing: *NewTypedRingGrowing[T](RingGrowingOptions{InitialSize: opts.InitialSize}), + normalSize: opts.NormalSize, + } +} + +// ReadOne reads (consumes) first item from the buffer if it is available, +// otherwise returns false. When the buffer has been totally consumed and has +// grown in size beyond its normal size, it shrinks down to its normal size again. +func (r *Ring[T]) ReadOne() (data T, ok bool) { + element, ok := r.growing.ReadOne() + + if r.growing.readable == 0 && r.growing.n > r.normalSize { + // The buffer is empty. Reallocate a new buffer so the old one can be + // garbage collected. + r.growing.data = make([]T, r.normalSize) + r.growing.n = r.normalSize + r.growing.beg = 0 + } + + return element, ok +} + +// WriteOne adds an item to the end of the buffer, growing it if it is full. +func (r *Ring[T]) WriteOne(data T) { + r.growing.WriteOne(data) +} + +// Len returns the number of items in the buffer. +func (r *Ring[T]) Len() int { + return r.growing.Len() +} + +// Cap returns the capacity of the buffer. +func (r *Ring[T]) Cap() int { + return r.growing.Cap() +} diff --git a/e2e/vendor/k8s.io/utils/cpuset/OWNERS b/e2e/vendor/k8s.io/utils/cpuset/OWNERS deleted file mode 100644 index 0ec2b085272..00000000000 --- a/e2e/vendor/k8s.io/utils/cpuset/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - dchen1107 - - derekwaynecarr - - ffromani - - klueska - - SergeyKanzhelev diff --git a/e2e/vendor/k8s.io/utils/cpuset/cpuset.go b/e2e/vendor/k8s.io/utils/cpuset/cpuset.go deleted file mode 100644 index 52912d95bcd..00000000000 --- a/e2e/vendor/k8s.io/utils/cpuset/cpuset.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cpuset represents a collection of CPUs in a 'set' data structure. -// -// It can be used to represent core IDs, hyper thread siblings, CPU nodes, or processor IDs. -// -// The only special thing about this package is that -// methods are provided to convert back and forth from Linux 'list' syntax. -// See http://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS for details. -// -// Future work can migrate this to use a 'set' library, and relax the dubious 'immutable' property. -// -// This package was originally developed in the 'kubernetes' repository. -package cpuset - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// CPUSet is a thread-safe, immutable set-like data structure for CPU IDs. -type CPUSet struct { - elems map[int]struct{} -} - -// New returns a new CPUSet containing the supplied elements. -func New(cpus ...int) CPUSet { - s := CPUSet{ - elems: map[int]struct{}{}, - } - for _, c := range cpus { - s.add(c) - } - return s -} - -// add adds the supplied elements to the CPUSet. -// It is intended for internal use only, since it mutates the CPUSet. -func (s CPUSet) add(elems ...int) { - for _, elem := range elems { - s.elems[elem] = struct{}{} - } -} - -// Size returns the number of elements in this set. -func (s CPUSet) Size() int { - return len(s.elems) -} - -// IsEmpty returns true if there are zero elements in this set. -func (s CPUSet) IsEmpty() bool { - return s.Size() == 0 -} - -// Contains returns true if the supplied element is present in this set. -func (s CPUSet) Contains(cpu int) bool { - _, found := s.elems[cpu] - return found -} - -// Equals returns true if the supplied set contains exactly the same elements -// as this set (s IsSubsetOf s2 and s2 IsSubsetOf s). -func (s CPUSet) Equals(s2 CPUSet) bool { - return reflect.DeepEqual(s.elems, s2.elems) -} - -// filter returns a new CPU set that contains all of the elements from this -// set that match the supplied predicate, without mutating the source set. -func (s CPUSet) filter(predicate func(int) bool) CPUSet { - r := New() - for cpu := range s.elems { - if predicate(cpu) { - r.add(cpu) - } - } - return r -} - -// IsSubsetOf returns true if the supplied set contains all the elements -func (s CPUSet) IsSubsetOf(s2 CPUSet) bool { - result := true - for cpu := range s.elems { - if !s2.Contains(cpu) { - result = false - break - } - } - return result -} - -// Union returns a new CPU set that contains all of the elements from this -// set and all of the elements from the supplied sets, without mutating -// either source set. -func (s CPUSet) Union(s2 ...CPUSet) CPUSet { - r := New() - for cpu := range s.elems { - r.add(cpu) - } - for _, cs := range s2 { - for cpu := range cs.elems { - r.add(cpu) - } - } - return r -} - -// Intersection returns a new CPU set that contains all of the elements -// that are present in both this set and the supplied set, without mutating -// either source set. -func (s CPUSet) Intersection(s2 CPUSet) CPUSet { - return s.filter(func(cpu int) bool { return s2.Contains(cpu) }) -} - -// Difference returns a new CPU set that contains all of the elements that -// are present in this set and not the supplied set, without mutating either -// source set. -func (s CPUSet) Difference(s2 CPUSet) CPUSet { - return s.filter(func(cpu int) bool { return !s2.Contains(cpu) }) -} - -// List returns a slice of integers that contains all elements from -// this set. The list is sorted. -func (s CPUSet) List() []int { - result := s.UnsortedList() - sort.Ints(result) - return result -} - -// UnsortedList returns a slice of integers that contains all elements from -// this set. -func (s CPUSet) UnsortedList() []int { - result := make([]int, 0, len(s.elems)) - for cpu := range s.elems { - result = append(result, cpu) - } - return result -} - -// String returns a new string representation of the elements in this CPU set -// in canonical linux CPU list format. -// -// See: http://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS -func (s CPUSet) String() string { - if s.IsEmpty() { - return "" - } - - elems := s.List() - - type rng struct { - start int - end int - } - - ranges := []rng{{elems[0], elems[0]}} - - for i := 1; i < len(elems); i++ { - lastRange := &ranges[len(ranges)-1] - // if this element is adjacent to the high end of the last range - if elems[i] == lastRange.end+1 { - // then extend the last range to include this element - lastRange.end = elems[i] - continue - } - // otherwise, start a new range beginning with this element - ranges = append(ranges, rng{elems[i], elems[i]}) - } - - // construct string from ranges - var result bytes.Buffer - for _, r := range ranges { - if r.start == r.end { - result.WriteString(strconv.Itoa(r.start)) - } else { - result.WriteString(fmt.Sprintf("%d-%d", r.start, r.end)) - } - result.WriteString(",") - } - return strings.TrimRight(result.String(), ",") -} - -// Parse CPUSet constructs a new CPU set from a Linux CPU list formatted string. -// -// See: http://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS -func Parse(s string) (CPUSet, error) { - // Handle empty string. - if s == "" { - return New(), nil - } - - result := New() - - // Split CPU list string: - // "0-5,34,46-48" => ["0-5", "34", "46-48"] - ranges := strings.Split(s, ",") - - for _, r := range ranges { - boundaries := strings.SplitN(r, "-", 2) - if len(boundaries) == 1 { - // Handle ranges that consist of only one element like "34". - elem, err := strconv.Atoi(boundaries[0]) - if err != nil { - return New(), err - } - result.add(elem) - } else if len(boundaries) == 2 { - // Handle multi-element ranges like "0-5". - start, err := strconv.Atoi(boundaries[0]) - if err != nil { - return New(), err - } - end, err := strconv.Atoi(boundaries[1]) - if err != nil { - return New(), err - } - if start > end { - return New(), fmt.Errorf("invalid range %q (%d > %d)", r, start, end) - } - // start == end is acceptable (1-1 -> 1) - - // Add all elements to the result. - // e.g. "0-5", "46-48" => [0, 1, 2, 3, 4, 5, 46, 47, 48]. - for e := start; e <= end; e++ { - result.add(e) - } - } - } - return result, nil -} - -// Clone returns a copy of this CPU set. -func (s CPUSet) Clone() CPUSet { - r := New() - for elem := range s.elems { - r.add(elem) - } - return r -} diff --git a/e2e/vendor/k8s.io/utils/inotify/LICENSE b/e2e/vendor/k8s.io/utils/inotify/LICENSE deleted file mode 100644 index 6a66aea5eaf..00000000000 --- a/e2e/vendor/k8s.io/utils/inotify/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/e2e/vendor/k8s.io/utils/inotify/PATENTS b/e2e/vendor/k8s.io/utils/inotify/PATENTS deleted file mode 100644 index 733099041f8..00000000000 --- a/e2e/vendor/k8s.io/utils/inotify/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/e2e/vendor/k8s.io/utils/inotify/README.md b/e2e/vendor/k8s.io/utils/inotify/README.md deleted file mode 100644 index fbaa1667a70..00000000000 --- a/e2e/vendor/k8s.io/utils/inotify/README.md +++ /dev/null @@ -1,5 +0,0 @@ -This is a fork of golang.org/x/exp/inotify before it was deleted. - -Please use gopkg.in/fsnotify.v1 instead. - -For updates, see: https://github.com/fsnotify/fsnotify diff --git a/e2e/vendor/k8s.io/utils/inotify/inotify.go b/e2e/vendor/k8s.io/utils/inotify/inotify.go deleted file mode 100644 index 6e997042412..00000000000 --- a/e2e/vendor/k8s.io/utils/inotify/inotify.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package inotify // import "k8s.io/utils/inotify" - -import ( - "sync" -) - -// Event represents a notification -type Event struct { - Mask uint32 // Mask of events - Cookie uint32 // Unique cookie associating related events (for rename(2)) - Name string // File name (optional) -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// Watcher represents an inotify instance -type Watcher struct { - mu sync.Mutex - fd int // File descriptor (as returned by the inotify_init() syscall) - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - Error chan error // Errors are sent on this channel - Event chan *Event // Events are returned on this channel - done chan bool // Channel for sending a "quit message" to the reader goroutine - isClosed bool // Set to true when Close() is first called -} diff --git a/e2e/vendor/k8s.io/utils/inotify/inotify_linux.go b/e2e/vendor/k8s.io/utils/inotify/inotify_linux.go deleted file mode 100644 index 2b9eabbdc7d..00000000000 --- a/e2e/vendor/k8s.io/utils/inotify/inotify_linux.go +++ /dev/null @@ -1,315 +0,0 @@ -//go:build linux -// +build linux - -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package inotify implements a wrapper for the Linux inotify system. - -Example: - - watcher, err := inotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - err = watcher.Watch("/tmp") - if err != nil { - log.Fatal(err) - } - for { - select { - case ev := <-watcher.Event: - log.Println("event:", ev) - case err := <-watcher.Error: - log.Println("error:", err) - } - } -*/ -package inotify // import "k8s.io/utils/inotify" - -import ( - "errors" - "fmt" - "os" - "strings" - "syscall" - "unsafe" -) - -// NewWatcher creates and returns a new inotify instance using inotify_init(2) -func NewWatcher() (*Watcher, error) { - fd, errno := syscall.InotifyInit1(syscall.IN_CLOEXEC) - if fd == -1 { - return nil, os.NewSyscallError("inotify_init", errno) - } - w := &Watcher{ - fd: fd, - watches: make(map[string]*watch), - paths: make(map[int]string), - Event: make(chan *Event), - Error: make(chan error), - done: make(chan bool, 1), - } - - go w.readEvents() - return w, nil -} - -// Close closes an inotify watcher instance -// It sends a message to the reader goroutine to quit and removes all watches -// associated with the inotify instance -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - w.done <- true - for path := range w.watches { - w.RemoveWatch(path) - } - - return nil -} - -// AddWatch adds path to the watched file set. -// The flags are interpreted as described in inotify_add_watch(2). -func (w *Watcher) AddWatch(path string, flags uint32) error { - if w.isClosed { - return errors.New("inotify instance already closed") - } - - watchEntry, found := w.watches[path] - if found { - watchEntry.flags |= flags - flags |= syscall.IN_MASK_ADD - } - - w.mu.Lock() // synchronize with readEvents goroutine - - wd, err := syscall.InotifyAddWatch(w.fd, path, flags) - if err != nil { - w.mu.Unlock() - return &os.PathError{ - Op: "inotify_add_watch", - Path: path, - Err: err, - } - } - - if !found { - w.watches[path] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = path - } - w.mu.Unlock() - return nil -} - -// Watch adds path to the watched file set, watching all events. -func (w *Watcher) Watch(path string) error { - return w.AddWatch(path, InAllEvents) -} - -// RemoveWatch removes path from the watched file set. -func (w *Watcher) RemoveWatch(path string) error { - watch, ok := w.watches[path] - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", path) - } - success, errno := syscall.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // when file descriptor or watch descriptor not found, InotifyRmWatch syscall return EINVAL error - // if return error, it may lead this path remain in watches and paths map, and no other event can trigger remove action. - if errno != syscall.EINVAL { - return os.NewSyscallError("inotify_rm_watch", errno) - } - } - delete(w.watches, path) - // Locking here to protect the read from paths in readEvents. - w.mu.Lock() - delete(w.paths, int(watch.wd)) - w.mu.Unlock() - return nil -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Event channel -func (w *Watcher) readEvents() { - var buf [syscall.SizeofInotifyEvent * 4096]byte - - for { - n, err := syscall.Read(w.fd, buf[:]) - // See if there is a message on the "done" channel - var done bool - select { - case done = <-w.done: - default: - } - - // If EOF or a "done" message is received - if n == 0 || done { - // The syscall.Close can be slow. Close - // w.Event first. - close(w.Event) - err := syscall.Close(w.fd) - if err != nil { - w.Error <- os.NewSyscallError("close", err) - } - close(w.Error) - return - } - if n < 0 { - w.Error <- os.NewSyscallError("read", err) - continue - } - if n < syscall.SizeofInotifyEvent { - w.Error <- errors.New("inotify: short read in readEvents()") - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-syscall.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset])) - event := new(Event) - event.Mask = uint32(raw.Mask) - event.Cookie = uint32(raw.Cookie) - nameLen := uint32(raw.Len) - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - w.mu.Unlock() - if ok { - event.Name = name - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent])) - // The filename is padded with NUL bytes. TrimRight() gets rid of those. - event.Name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - // Send the event on the events channel - w.Event <- event - } - // Move to the next event in the buffer - offset += syscall.SizeofInotifyEvent + nameLen - } - } -} - -// String formats the event e in the form -// "filename: 0xEventMask = IN_ACCESS|IN_ATTRIB_|..." -func (e *Event) String() string { - var events string - - m := e.Mask - for _, b := range eventBits { - if m&b.Value == b.Value { - m &^= b.Value - events += "|" + b.Name - } - } - - if m != 0 { - events += fmt.Sprintf("|%#x", m) - } - if len(events) > 0 { - events = " == " + events[1:] - } - - return fmt.Sprintf("%q: %#x%s", e.Name, e.Mask, events) -} - -const ( - // Options for inotify_init() are not exported - // IN_CLOEXEC uint32 = syscall.IN_CLOEXEC - // IN_NONBLOCK uint32 = syscall.IN_NONBLOCK - - // Options for AddWatch - - // InDontFollow : Don't dereference pathname if it is a symbolic link - InDontFollow uint32 = syscall.IN_DONT_FOLLOW - // InOneshot : Monitor the filesystem object corresponding to pathname for one event, then remove from watch list - InOneshot uint32 = syscall.IN_ONESHOT - // InOnlydir : Watch pathname only if it is a directory - InOnlydir uint32 = syscall.IN_ONLYDIR - - // The "IN_MASK_ADD" option is not exported, as AddWatch - // adds it automatically, if there is already a watch for the given path - // IN_MASK_ADD uint32 = syscall.IN_MASK_ADD - - // Events - - // InAccess : File was accessed - InAccess uint32 = syscall.IN_ACCESS - // InAllEvents : Bit mask for all notify events - InAllEvents uint32 = syscall.IN_ALL_EVENTS - // InAttrib : Metadata changed - InAttrib uint32 = syscall.IN_ATTRIB - // InClose : Equates to IN_CLOSE_WRITE | IN_CLOSE_NOWRITE - InClose uint32 = syscall.IN_CLOSE - // InCloseNowrite : File or directory not opened for writing was closed - InCloseNowrite uint32 = syscall.IN_CLOSE_NOWRITE - // InCloseWrite : File opened for writing was closed - InCloseWrite uint32 = syscall.IN_CLOSE_WRITE - // InCreate : File/directory created in watched directory - InCreate uint32 = syscall.IN_CREATE - // InDelete : File/directory deleted from watched directory - InDelete uint32 = syscall.IN_DELETE - // InDeleteSelf : Watched file/directory was itself deleted - InDeleteSelf uint32 = syscall.IN_DELETE_SELF - // InModify : File was modified - InModify uint32 = syscall.IN_MODIFY - // InMove : Equates to IN_MOVED_FROM | IN_MOVED_TO - InMove uint32 = syscall.IN_MOVE - // InMovedFrom : Generated for the directory containing the old filename when a file is renamed - InMovedFrom uint32 = syscall.IN_MOVED_FROM - // InMovedTo : Generated for the directory containing the new filename when a file is renamed - InMovedTo uint32 = syscall.IN_MOVED_TO - // InMoveSelf : Watched file/directory was itself moved - InMoveSelf uint32 = syscall.IN_MOVE_SELF - // InOpen : File or directory was opened - InOpen uint32 = syscall.IN_OPEN - - // Special events - - // InIsdir : Subject of this event is a directory - InIsdir uint32 = syscall.IN_ISDIR - // InIgnored : Watch was removed explicitly or automatically - InIgnored uint32 = syscall.IN_IGNORED - // InQOverflow : Event queue overflowed - InQOverflow uint32 = syscall.IN_Q_OVERFLOW - // InUnmount : Filesystem containing watched object was unmounted - InUnmount uint32 = syscall.IN_UNMOUNT -) - -var eventBits = []struct { - Value uint32 - Name string -}{ - {InAccess, "IN_ACCESS"}, - {InAttrib, "IN_ATTRIB"}, - {InClose, "IN_CLOSE"}, - {InCloseNowrite, "IN_CLOSE_NOWRITE"}, - {InCloseWrite, "IN_CLOSE_WRITE"}, - {InCreate, "IN_CREATE"}, - {InDelete, "IN_DELETE"}, - {InDeleteSelf, "IN_DELETE_SELF"}, - {InModify, "IN_MODIFY"}, - {InMove, "IN_MOVE"}, - {InMovedFrom, "IN_MOVED_FROM"}, - {InMovedTo, "IN_MOVED_TO"}, - {InMoveSelf, "IN_MOVE_SELF"}, - {InOpen, "IN_OPEN"}, - {InIsdir, "IN_ISDIR"}, - {InIgnored, "IN_IGNORED"}, - {InQOverflow, "IN_Q_OVERFLOW"}, - {InUnmount, "IN_UNMOUNT"}, -} diff --git a/e2e/vendor/k8s.io/utils/inotify/inotify_others.go b/e2e/vendor/k8s.io/utils/inotify/inotify_others.go deleted file mode 100644 index 7297644040d..00000000000 --- a/e2e/vendor/k8s.io/utils/inotify/inotify_others.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !linux - -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package inotify // import "k8s.io/utils/inotify" - -import ( - "fmt" - "runtime" -) - -var errNotSupported = fmt.Errorf("watch not supported on %s", runtime.GOOS) - -// NewWatcher creates and returns a new inotify instance using inotify_init(2) -func NewWatcher() (*Watcher, error) { - return nil, errNotSupported -} - -// Close closes an inotify watcher instance -// It sends a message to the reader goroutine to quit and removes all watches -// associated with the inotify instance -func (w *Watcher) Close() error { - return errNotSupported -} - -// AddWatch adds path to the watched file set. -// The flags are interpreted as described in inotify_add_watch(2). -func (w *Watcher) AddWatch(path string, flags uint32) error { - return errNotSupported -} - -// Watch adds path to the watched file set, watching all events. -func (w *Watcher) Watch(path string) error { - return errNotSupported -} - -// RemoveWatch removes path from the watched file set. -func (w *Watcher) RemoveWatch(path string) error { - return errNotSupported -} diff --git a/e2e/vendor/modules.txt b/e2e/vendor/modules.txt index f92d4b9e156..4cb04906047 100644 --- a/e2e/vendor/modules.txt +++ b/e2e/vendor/modules.txt @@ -1,19 +1,9 @@ # cel.dev/expr v0.24.0 ## explicit; go 1.22.0 cel.dev/expr -# github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab -## explicit -github.com/JeffAshton/win_pdh # github.com/Masterminds/semver/v3 v3.4.0 ## explicit; go 1.21 github.com/Masterminds/semver/v3 -# github.com/Microsoft/go-winio v0.6.2 -## explicit; go 1.21 -github.com/Microsoft/go-winio -github.com/Microsoft/go-winio/internal/fs -github.com/Microsoft/go-winio/internal/socket -github.com/Microsoft/go-winio/internal/stringbuffer -github.com/Microsoft/go-winio/pkg/guid # github.com/antlr4-go/antlr/v4 v4.13.0 ## explicit; go 1.20 github.com/antlr4-go/antlr/v4 @@ -40,60 +30,21 @@ github.com/ceph/ceph-csi/api/deploy/kubernetes # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/containerd/containerd/api v1.8.0 -## explicit; go 1.21 -github.com/containerd/containerd/api/services/containers/v1 -github.com/containerd/containerd/api/services/tasks/v1 -github.com/containerd/containerd/api/services/version/v1 -github.com/containerd/containerd/api/types -github.com/containerd/containerd/api/types/task -# github.com/containerd/errdefs v1.0.0 -## explicit; go 1.20 -github.com/containerd/errdefs -# github.com/containerd/errdefs/pkg v0.3.0 -## explicit; go 1.22 -github.com/containerd/errdefs/pkg/errgrpc -github.com/containerd/errdefs/pkg/internal/cause -github.com/containerd/errdefs/pkg/internal/types -# github.com/containerd/log v0.1.0 -## explicit; go 1.20 -github.com/containerd/log -# github.com/containerd/ttrpc v1.2.6 -## explicit; go 1.19 -github.com/containerd/ttrpc -# github.com/containerd/typeurl/v2 v2.2.2 -## explicit; go 1.21 -github.com/containerd/typeurl/v2 -# github.com/coreos/go-systemd/v22 v22.5.0 -## explicit; go 1.12 -github.com/coreos/go-systemd/v22/dbus -# github.com/cyphar/filepath-securejoin v0.4.1 -## explicit; go 1.18 -github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/go-units v0.5.0 -## explicit -github.com/docker/go-units -# github.com/emicklei/go-restful/v3 v3.12.1 +# github.com/emicklei/go-restful/v3 v3.12.2 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/euank/go-kmsg-parser v2.0.0+incompatible -## explicit -github.com/euank/go-kmsg-parser/kmsgparser # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.7.0 -## explicit; go 1.17 -github.com/fsnotify/fsnotify -# github.com/fxamacker/cbor/v2 v2.7.0 -## explicit; go 1.17 +# github.com/fxamacker/cbor/v2 v2.9.0 +## explicit; go 1.20 github.com/fxamacker/cbor/v2 # github.com/go-logr/logr v1.4.3 ## explicit; go 1.18 @@ -115,57 +66,12 @@ github.com/go-openapi/swag # github.com/go-task/slim-sprig/v3 v3.0.0 ## explicit; go 1.20 github.com/go-task/slim-sprig/v3 -# github.com/godbus/dbus/v5 v5.1.0 -## explicit; go 1.12 -github.com/godbus/dbus/v5 # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 -github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto -github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys -# github.com/google/cadvisor v0.52.1 -## explicit; go 1.23.0 -github.com/google/cadvisor/cache/memory -github.com/google/cadvisor/collector -github.com/google/cadvisor/container -github.com/google/cadvisor/container/common -github.com/google/cadvisor/container/containerd -github.com/google/cadvisor/container/containerd/containers -github.com/google/cadvisor/container/containerd/identifiers -github.com/google/cadvisor/container/containerd/install -github.com/google/cadvisor/container/containerd/namespaces -github.com/google/cadvisor/container/containerd/pkg/dialer -github.com/google/cadvisor/container/crio -github.com/google/cadvisor/container/crio/install -github.com/google/cadvisor/container/libcontainer -github.com/google/cadvisor/container/raw -github.com/google/cadvisor/container/systemd -github.com/google/cadvisor/container/systemd/install -github.com/google/cadvisor/devicemapper -github.com/google/cadvisor/events -github.com/google/cadvisor/fs -github.com/google/cadvisor/info/v1 -github.com/google/cadvisor/info/v2 -github.com/google/cadvisor/machine -github.com/google/cadvisor/manager -github.com/google/cadvisor/nvm -github.com/google/cadvisor/perf -github.com/google/cadvisor/resctrl -github.com/google/cadvisor/stats -github.com/google/cadvisor/storage -github.com/google/cadvisor/summary -github.com/google/cadvisor/utils -github.com/google/cadvisor/utils/cloudinfo -github.com/google/cadvisor/utils/cpuload -github.com/google/cadvisor/utils/cpuload/netlink -github.com/google/cadvisor/utils/oomparser -github.com/google/cadvisor/utils/sysfs -github.com/google/cadvisor/utils/sysinfo -github.com/google/cadvisor/version -github.com/google/cadvisor/watcher -# github.com/google/cel-go v0.23.2 -## explicit; go 1.21.1 +# github.com/google/cel-go v0.26.0 +## explicit; go 1.22.0 github.com/google/cel-go/cel github.com/google/cel-go/checker github.com/google/cel-go/checker/decls @@ -174,6 +80,7 @@ github.com/google/cel-go/common/ast github.com/google/cel-go/common/containers github.com/google/cel-go/common/debug github.com/google/cel-go/common/decls +github.com/google/cel-go/common/env github.com/google/cel-go/common/functions github.com/google/cel-go/common/operators github.com/google/cel-go/common/overloads @@ -188,8 +95,8 @@ github.com/google/cel-go/interpreter github.com/google/cel-go/interpreter/functions github.com/google/cel-go/parser github.com/google/cel-go/parser/gen -# github.com/google/gnostic-models v0.6.9 -## explicit; go 1.21 +# github.com/google/gnostic-models v0.7.0 +## explicit; go 1.22 github.com/google/gnostic-models/compiler github.com/google/gnostic-models/extensions github.com/google/gnostic-models/jsonschema @@ -198,7 +105,6 @@ github.com/google/gnostic-models/openapiv3 # github.com/google/go-cmp v0.7.0 ## explicit; go 1.21 github.com/google/go-cmp/cmp -github.com/google/go-cmp/cmp/cmpopts github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function @@ -212,8 +118,8 @@ github.com/google/uuid # github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 ## explicit; go 1.20 github.com/gorilla/websocket -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 -## explicit; go 1.22.7 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 +## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -226,9 +132,6 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/karrick/godirwalk v1.17.0 -## explicit; go 1.13 -github.com/karrick/godirwalk # github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 ## explicit; go 1.22.0 github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1 @@ -244,9 +147,6 @@ github.com/kylelemons/godebug/diff github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible -## explicit -github.com/mistifyio/go-zfs # github.com/moby/spdystream v0.5.0 ## explicit; go 1.13 github.com/moby/spdystream @@ -254,13 +154,10 @@ github.com/moby/spdystream/spdy # github.com/moby/sys/mountinfo v0.7.2 ## explicit; go 1.17 github.com/moby/sys/mountinfo -# github.com/moby/sys/userns v0.1.0 -## explicit; go 1.21 -github.com/moby/sys/userns # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent -# github.com/modern-go/reflect2 v1.0.2 +# github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee ## explicit; go 1.12 github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 @@ -269,7 +166,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.25.3 +# github.com/onsi/ginkgo/v2 v2.26.0 ## explicit; go 1.23.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -288,6 +185,7 @@ github.com/onsi/ginkgo/v2/internal github.com/onsi/ginkgo/v2/internal/global github.com/onsi/ginkgo/v2/internal/interrupt_handler github.com/onsi/ginkgo/v2/internal/parallel_support +github.com/onsi/ginkgo/v2/internal/reporters github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types @@ -305,26 +203,9 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/opencontainers/cgroups v0.0.1 -## explicit; go 1.23.0 -github.com/opencontainers/cgroups -github.com/opencontainers/cgroups/devices/config -github.com/opencontainers/cgroups/fs -github.com/opencontainers/cgroups/fs2 -github.com/opencontainers/cgroups/fscommon -github.com/opencontainers/cgroups/internal/path -github.com/opencontainers/cgroups/manager -github.com/opencontainers/cgroups/systemd # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.1 -## explicit; go 1.18 -github.com/opencontainers/image-spec/specs-go -github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runtime-spec v1.2.0 -## explicit -github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/selinux v1.11.1 ## explicit; go 1.19 github.com/opencontainers/selinux/go-selinux @@ -333,7 +214,10 @@ github.com/opencontainers/selinux/pkg/pwalkdir # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.23.0 +# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 +## explicit +github.com/pmezard/go-difflib/difflib +# github.com/prometheus/client_golang v1.23.2 ## explicit; go 1.23.0 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -348,7 +232,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.65.0 +# github.com/prometheus/common v0.66.1 ## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model @@ -357,13 +241,10 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/sirupsen/logrus v1.9.3 -## explicit; go 1.13 -github.com/sirupsen/logrus -# github.com/spf13/cobra v1.8.1 +# github.com/spf13/cobra v1.9.1 ## explicit; go 1.15 github.com/spf13/cobra -# github.com/spf13/pflag v1.0.5 +# github.com/spf13/pflag v1.0.6 ## explicit; go 1.12 github.com/spf13/pflag # github.com/stoewer/go-strcase v1.3.0 @@ -376,10 +257,6 @@ github.com/x448/float16 ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 -## explicit; go 1.22.7 -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal # go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 ## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp @@ -400,12 +277,12 @@ go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.34.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 -## explicit; go 1.22.7 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 -## explicit; go 1.22.7 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig @@ -430,8 +307,8 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.4.0 -## explicit; go 1.22.7 +# go.opentelemetry.io/proto/otlp v1.5.0 +## explicit; go 1.22.0 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 @@ -442,11 +319,14 @@ go.uber.org/automaxprocs go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs +# go.yaml.in/yaml/v2 v2.4.2 +## explicit; go 1.15 +go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/crypto v0.41.0 -## explicit; go 1.23.0 +# golang.org/x/crypto v0.42.0 +## explicit; go 1.24.0 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/curve25519 @@ -458,9 +338,11 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.43.0 +# golang.org/x/mod v0.27.0 ## explicit; go 1.23.0 -golang.org/x/net/context +golang.org/x/mod/semver +# golang.org/x/net v0.44.0 +## explicit; go 1.24.0 golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/html/charset @@ -478,21 +360,22 @@ golang.org/x/net/websocket ## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.16.0 -## explicit; go 1.23.0 +# golang.org/x/sync v0.17.0 +## explicit; go 1.24.0 +golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.35.0 -## explicit; go 1.23.0 +# golang.org/x/sys v0.36.0 +## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.34.0 -## explicit; go 1.23.0 +# golang.org/x/term v0.35.0 +## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.28.0 -## explicit; go 1.23.0 +# golang.org/x/text v0.29.0 +## explicit; go 1.24.0 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -521,14 +404,31 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.9.0 -## explicit; go 1.18 +# golang.org/x/time v0.12.0 +## explicit; go 1.23.0 golang.org/x/time/rate # golang.org/x/tools v0.36.0 ## explicit; go 1.23.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/edge golang.org/x/tools/go/ast/inspector +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/types/typeutil +golang.org/x/tools/internal/aliases +golang.org/x/tools/internal/event +golang.org/x/tools/internal/event/core +golang.org/x/tools/internal/event/keys +golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/gcimporter +golang.org/x/tools/internal/gocommand +golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/stdlib +golang.org/x/tools/internal/typeparams +golang.org/x/tools/internal/typesinternal +golang.org/x/tools/internal/versions # google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api/expr/v1alpha1 @@ -537,7 +437,7 @@ google.golang.org/genproto/googleapis/api/httpbody ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.75.0 +# google.golang.org/grpc v1.75.1 ## explicit; go 1.23.0 google.golang.org/grpc google.golang.org/grpc/attributes @@ -601,7 +501,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.8 +# google.golang.org/protobuf v1.36.9 ## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson @@ -655,7 +555,7 @@ gopkg.in/inf.v0 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.33.4 => k8s.io/api v0.33.2 +# k8s.io/api v0.34.1 => k8s.io/api v0.33.2 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -695,7 +595,6 @@ k8s.io/api/flowcontrol/v1 k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 k8s.io/api/flowcontrol/v1beta3 -k8s.io/api/imagepolicy/v1alpha1 k8s.io/api/networking/v1 k8s.io/api/networking/v1alpha1 k8s.io/api/networking/v1beta1 @@ -717,12 +616,12 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.33.4 => k8s.io/apiextensions-apiserver v0.33.2 +# k8s.io/apiextensions-apiserver v0.34.0 => k8s.io/apiextensions-apiserver v0.33.2 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.33.4 +# k8s.io/apimachinery v0.34.1 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -793,7 +692,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.33.4 +# k8s.io/apiserver v0.34.1 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -833,18 +732,12 @@ k8s.io/apiserver/pkg/cel/library k8s.io/apiserver/pkg/cel/mutation k8s.io/apiserver/pkg/cel/mutation/dynamic k8s.io/apiserver/pkg/cel/openapi/resolver -k8s.io/apiserver/pkg/endpoints/metrics k8s.io/apiserver/pkg/endpoints/openapi k8s.io/apiserver/pkg/endpoints/request -k8s.io/apiserver/pkg/endpoints/responsewriter k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/quota/v1 k8s.io/apiserver/pkg/server/egressselector k8s.io/apiserver/pkg/server/egressselector/metrics -k8s.io/apiserver/pkg/server/healthz -k8s.io/apiserver/pkg/server/httplog -k8s.io/apiserver/pkg/server/routine -k8s.io/apiserver/pkg/storage k8s.io/apiserver/pkg/storage/names k8s.io/apiserver/pkg/util/compatibility k8s.io/apiserver/pkg/util/feature @@ -853,7 +746,6 @@ k8s.io/apiserver/pkg/util/x509metrics k8s.io/apiserver/pkg/warning # k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.33.2 ## explicit; go 1.24.0 -k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -883,7 +775,6 @@ k8s.io/client-go/applyconfigurations/flowcontrol/v1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3 -k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1 k8s.io/client-go/applyconfigurations/internal k8s.io/client-go/applyconfigurations/meta/v1 k8s.io/client-go/applyconfigurations/networking/v1 @@ -909,10 +800,7 @@ k8s.io/client-go/applyconfigurations/storage/v1beta1 k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1 k8s.io/client-go/discovery k8s.io/client-go/discovery/cached/memory -k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic -k8s.io/client-go/dynamic/dynamicinformer -k8s.io/client-go/dynamic/dynamiclister k8s.io/client-go/features k8s.io/client-go/gentype k8s.io/client-go/informers @@ -988,118 +876,62 @@ k8s.io/client-go/informers/storage/v1beta1 k8s.io/client-go/informers/storagemigration k8s.io/client-go/informers/storagemigration/v1alpha1 k8s.io/client-go/kubernetes -k8s.io/client-go/kubernetes/fake k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1 -k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 -k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 -k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1 -k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake k8s.io/client-go/kubernetes/typed/apps/v1 -k8s.io/client-go/kubernetes/typed/apps/v1/fake k8s.io/client-go/kubernetes/typed/apps/v1beta1 -k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake k8s.io/client-go/kubernetes/typed/apps/v1beta2 -k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake k8s.io/client-go/kubernetes/typed/authentication/v1 -k8s.io/client-go/kubernetes/typed/authentication/v1/fake k8s.io/client-go/kubernetes/typed/authentication/v1alpha1 -k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake k8s.io/client-go/kubernetes/typed/authentication/v1beta1 -k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake k8s.io/client-go/kubernetes/typed/authorization/v1 -k8s.io/client-go/kubernetes/typed/authorization/v1/fake k8s.io/client-go/kubernetes/typed/authorization/v1beta1 -k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake k8s.io/client-go/kubernetes/typed/autoscaling/v1 -k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake k8s.io/client-go/kubernetes/typed/autoscaling/v2 -k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1 -k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 -k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake k8s.io/client-go/kubernetes/typed/batch/v1 -k8s.io/client-go/kubernetes/typed/batch/v1/fake k8s.io/client-go/kubernetes/typed/batch/v1beta1 -k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake k8s.io/client-go/kubernetes/typed/certificates/v1 -k8s.io/client-go/kubernetes/typed/certificates/v1/fake k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 -k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake k8s.io/client-go/kubernetes/typed/certificates/v1beta1 -k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake k8s.io/client-go/kubernetes/typed/coordination/v1 -k8s.io/client-go/kubernetes/typed/coordination/v1/fake k8s.io/client-go/kubernetes/typed/coordination/v1alpha2 -k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake k8s.io/client-go/kubernetes/typed/coordination/v1beta1 -k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake k8s.io/client-go/kubernetes/typed/core/v1 -k8s.io/client-go/kubernetes/typed/core/v1/fake k8s.io/client-go/kubernetes/typed/discovery/v1 -k8s.io/client-go/kubernetes/typed/discovery/v1/fake k8s.io/client-go/kubernetes/typed/discovery/v1beta1 -k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake k8s.io/client-go/kubernetes/typed/events/v1 -k8s.io/client-go/kubernetes/typed/events/v1/fake k8s.io/client-go/kubernetes/typed/events/v1beta1 -k8s.io/client-go/kubernetes/typed/events/v1beta1/fake k8s.io/client-go/kubernetes/typed/extensions/v1beta1 -k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake k8s.io/client-go/kubernetes/typed/networking/v1 -k8s.io/client-go/kubernetes/typed/networking/v1/fake k8s.io/client-go/kubernetes/typed/networking/v1alpha1 -k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake k8s.io/client-go/kubernetes/typed/networking/v1beta1 -k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake k8s.io/client-go/kubernetes/typed/node/v1 -k8s.io/client-go/kubernetes/typed/node/v1/fake k8s.io/client-go/kubernetes/typed/node/v1alpha1 -k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake k8s.io/client-go/kubernetes/typed/node/v1beta1 -k8s.io/client-go/kubernetes/typed/node/v1beta1/fake k8s.io/client-go/kubernetes/typed/policy/v1 -k8s.io/client-go/kubernetes/typed/policy/v1/fake k8s.io/client-go/kubernetes/typed/policy/v1beta1 -k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake k8s.io/client-go/kubernetes/typed/rbac/v1 -k8s.io/client-go/kubernetes/typed/rbac/v1/fake k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 -k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake k8s.io/client-go/kubernetes/typed/rbac/v1beta1 -k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake k8s.io/client-go/kubernetes/typed/resource/v1alpha3 -k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake k8s.io/client-go/kubernetes/typed/resource/v1beta1 -k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake k8s.io/client-go/kubernetes/typed/resource/v1beta2 -k8s.io/client-go/kubernetes/typed/resource/v1beta2/fake k8s.io/client-go/kubernetes/typed/scheduling/v1 -k8s.io/client-go/kubernetes/typed/scheduling/v1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 -k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 -k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake k8s.io/client-go/kubernetes/typed/storage/v1 -k8s.io/client-go/kubernetes/typed/storage/v1/fake k8s.io/client-go/kubernetes/typed/storage/v1alpha1 -k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1beta1 -k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1 -k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake k8s.io/client-go/listers k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1alpha1 @@ -1160,7 +992,6 @@ k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest -k8s.io/client-go/rest/fake k8s.io/client-go/rest/watch k8s.io/client-go/restmapper k8s.io/client-go/scale @@ -1179,7 +1010,6 @@ k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 -k8s.io/client-go/tools/events k8s.io/client-go/tools/internal/events k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager @@ -1203,68 +1033,39 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.33.4 +# k8s.io/cloud-provider v0.34.1 ## explicit; go 1.24.0 k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.33.4 +# k8s.io/component-base v0.34.1 ## explicit; go 1.24.0 k8s.io/component-base/cli/flag k8s.io/component-base/compatibility -k8s.io/component-base/config -k8s.io/component-base/config/v1alpha1 -k8s.io/component-base/config/validation k8s.io/component-base/featuregate k8s.io/component-base/logs k8s.io/component-base/logs/api/v1 k8s.io/component-base/logs/internal/setverbositylevel k8s.io/component-base/logs/klogflags -k8s.io/component-base/logs/logreduction k8s.io/component-base/logs/testinit k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry +k8s.io/component-base/metrics/prometheus/compatversion k8s.io/component-base/metrics/prometheus/feature -k8s.io/component-base/metrics/prometheus/slis k8s.io/component-base/metrics/prometheusextension k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version k8s.io/component-base/zpages/features -# k8s.io/component-helpers v0.33.4 +# k8s.io/component-helpers v0.34.1 ## explicit; go 1.24.0 -k8s.io/component-helpers/node/topology k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/resource k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity -k8s.io/component-helpers/storage/ephemeral -k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.33.4 +# k8s.io/controller-manager v0.34.1 ## explicit; go 1.24.0 k8s.io/controller-manager/pkg/features -# k8s.io/cri-api v0.33.2 -## explicit; go 1.24.0 -k8s.io/cri-api/pkg/apis -k8s.io/cri-api/pkg/apis/runtime/v1 -# k8s.io/cri-client v0.0.0 => k8s.io/cri-client v0.33.2 -## explicit; go 1.24.0 -k8s.io/cri-client/pkg -k8s.io/cri-client/pkg/internal -k8s.io/cri-client/pkg/logs -k8s.io/cri-client/pkg/util -# k8s.io/csi-translation-lib v0.33.4 => k8s.io/csi-translation-lib v0.33.2 -## explicit; go 1.24.0 -k8s.io/csi-translation-lib -k8s.io/csi-translation-lib/plugins -# k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.33.2 -## explicit; go 1.24.0 -k8s.io/dynamic-resource-allocation/api -k8s.io/dynamic-resource-allocation/cel -k8s.io/dynamic-resource-allocation/internal/queue -k8s.io/dynamic-resource-allocation/resourceclaim -k8s.io/dynamic-resource-allocation/resourceslice/tracker -k8s.io/dynamic-resource-allocation/structured # k8s.io/klog/v2 v2.130.1 ## explicit; go 1.18 k8s.io/klog/v2 @@ -1276,8 +1077,8 @@ k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler k8s.io/klog/v2/internal/verbosity k8s.io/klog/v2/textlogger -# k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff -## explicit; go 1.21 +# k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b +## explicit; go 1.23 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 @@ -1292,10 +1093,6 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.33.2 -## explicit; go 1.24.0 -k8s.io/kube-scheduler/config/v1 -k8s.io/kube-scheduler/extender/v1 # k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.33.2 ## explicit; go 1.24.0 k8s.io/kubectl/pkg/scale @@ -1303,14 +1100,8 @@ k8s.io/kubectl/pkg/util/podutils # k8s.io/kubelet v0.33.2 => k8s.io/kubelet v0.33.2 ## explicit; go 1.24.0 k8s.io/kubelet/pkg/apis -k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 -k8s.io/kubelet/pkg/apis/dra/v1alpha4 -k8s.io/kubelet/pkg/apis/dra/v1beta1 -k8s.io/kubelet/pkg/apis/pluginregistration/v1 -k8s.io/kubelet/pkg/apis/podresources/v1 -k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.33.4 +# k8s.io/kubernetes v1.34.1 ## explicit; go 1.24.0 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1319,6 +1110,7 @@ k8s.io/kubernetes/pkg/api/v1/service k8s.io/kubernetes/pkg/apis/apps k8s.io/kubernetes/pkg/apis/autoscaling k8s.io/kubernetes/pkg/apis/batch +k8s.io/kubernetes/pkg/apis/certificates k8s.io/kubernetes/pkg/apis/core k8s.io/kubernetes/pkg/apis/core/helper k8s.io/kubernetes/pkg/apis/core/helper/qos @@ -1326,113 +1118,21 @@ k8s.io/kubernetes/pkg/apis/core/install k8s.io/kubernetes/pkg/apis/core/pods k8s.io/kubernetes/pkg/apis/core/v1 k8s.io/kubernetes/pkg/apis/core/v1/helper -k8s.io/kubernetes/pkg/apis/core/v1/helper/qos k8s.io/kubernetes/pkg/apis/core/validation k8s.io/kubernetes/pkg/apis/extensions k8s.io/kubernetes/pkg/apis/networking -k8s.io/kubernetes/pkg/apis/scheduling k8s.io/kubernetes/pkg/capabilities k8s.io/kubernetes/pkg/client/conditions k8s.io/kubernetes/pkg/controller k8s.io/kubernetes/pkg/controller/deployment/util -k8s.io/kubernetes/pkg/credentialprovider k8s.io/kubernetes/pkg/features k8s.io/kubernetes/pkg/fieldpath -k8s.io/kubernetes/pkg/kubelet/apis/config -k8s.io/kubernetes/pkg/kubelet/apis/podresources -k8s.io/kubernetes/pkg/kubelet/cadvisor -k8s.io/kubernetes/pkg/kubelet/checkpointmanager -k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum -k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors -k8s.io/kubernetes/pkg/kubelet/cm -k8s.io/kubernetes/pkg/kubelet/cm/admission -k8s.io/kubernetes/pkg/kubelet/cm/containermap -k8s.io/kubernetes/pkg/kubelet/cm/cpumanager -k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state -k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology -k8s.io/kubernetes/pkg/kubelet/cm/devicemanager -k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint -k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1 -k8s.io/kubernetes/pkg/kubelet/cm/dra -k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin -k8s.io/kubernetes/pkg/kubelet/cm/dra/state -k8s.io/kubernetes/pkg/kubelet/cm/memorymanager -k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state -k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates -k8s.io/kubernetes/pkg/kubelet/cm/topologymanager -k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask -k8s.io/kubernetes/pkg/kubelet/cm/util -k8s.io/kubernetes/pkg/kubelet/config -k8s.io/kubernetes/pkg/kubelet/container k8s.io/kubernetes/pkg/kubelet/events -k8s.io/kubernetes/pkg/kubelet/eviction/api -k8s.io/kubernetes/pkg/kubelet/kuberuntime/util -k8s.io/kubernetes/pkg/kubelet/lifecycle -k8s.io/kubernetes/pkg/kubelet/metrics -k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache -k8s.io/kubernetes/pkg/kubelet/qos k8s.io/kubernetes/pkg/kubelet/server/metrics -k8s.io/kubernetes/pkg/kubelet/stats/pidlimit -k8s.io/kubernetes/pkg/kubelet/status -k8s.io/kubernetes/pkg/kubelet/types -k8s.io/kubernetes/pkg/kubelet/util -k8s.io/kubernetes/pkg/kubelet/util/format -k8s.io/kubernetes/pkg/kubelet/util/store -k8s.io/kubernetes/pkg/kubelet/util/swap -k8s.io/kubernetes/pkg/kubelet/winstats -k8s.io/kubernetes/pkg/probe -k8s.io/kubernetes/pkg/probe/http -k8s.io/kubernetes/pkg/scheduler -k8s.io/kubernetes/pkg/scheduler/apis/config -k8s.io/kubernetes/pkg/scheduler/apis/config/scheme -k8s.io/kubernetes/pkg/scheduler/apis/config/v1 -k8s.io/kubernetes/pkg/scheduler/apis/config/validation -k8s.io/kubernetes/pkg/scheduler/backend/cache -k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger -k8s.io/kubernetes/pkg/scheduler/backend/heap -k8s.io/kubernetes/pkg/scheduler/backend/queue -k8s.io/kubernetes/pkg/scheduler/framework -k8s.io/kubernetes/pkg/scheduler/framework/parallelize -k8s.io/kubernetes/pkg/scheduler/framework/plugins -k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder -k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption -k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources -k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature -k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper -k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality -k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity -k8s.io/kubernetes/pkg/scheduler/framework/plugins/names -k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity -k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename -k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports -k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources -k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable -k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits -k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread -k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort -k8s.io/kubernetes/pkg/scheduler/framework/plugins/schedulinggates -k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration -k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding -k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics -k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions -k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone -k8s.io/kubernetes/pkg/scheduler/framework/preemption -k8s.io/kubernetes/pkg/scheduler/framework/runtime -k8s.io/kubernetes/pkg/scheduler/metrics -k8s.io/kubernetes/pkg/scheduler/profile -k8s.io/kubernetes/pkg/scheduler/util -k8s.io/kubernetes/pkg/scheduler/util/assumecache -k8s.io/kubernetes/pkg/scheduler/util/queue -k8s.io/kubernetes/pkg/security/apparmor k8s.io/kubernetes/pkg/securitycontext -k8s.io/kubernetes/pkg/util/filesystem k8s.io/kubernetes/pkg/util/hash -k8s.io/kubernetes/pkg/util/kernel k8s.io/kubernetes/pkg/util/labels -k8s.io/kubernetes/pkg/util/oom k8s.io/kubernetes/pkg/util/parsers -k8s.io/kubernetes/pkg/util/pod -k8s.io/kubernetes/pkg/util/slice k8s.io/kubernetes/pkg/util/taints k8s.io/kubernetes/pkg/volume k8s.io/kubernetes/pkg/volume/util @@ -1466,23 +1166,18 @@ k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/format k8s.io/kubernetes/test/utils/image k8s.io/kubernetes/test/utils/kubeconfig -k8s.io/kubernetes/third_party/forked/golang/expansion -k8s.io/kubernetes/third_party/forked/libcontainer/apparmor -k8s.io/kubernetes/third_party/forked/libcontainer/utils -# k8s.io/mount-utils v0.33.4 +# k8s.io/mount-utils v0.34.1 ## explicit; go 1.24.0 k8s.io/mount-utils # k8s.io/pod-security-admission v0.33.4 ## explicit; go 1.24.0 k8s.io/pod-security-admission/api k8s.io/pod-security-admission/policy -# k8s.io/utils v0.0.0-20241210054802-24370beab758 +# k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock -k8s.io/utils/cpuset k8s.io/utils/exec -k8s.io/utils/inotify k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/golang-lru k8s.io/utils/internal/third_party/forked/golang/net @@ -1512,12 +1207,18 @@ sigs.k8s.io/randfill/bytesource # sigs.k8s.io/structured-merge-diff/v4 v4.6.0 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath -sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value -# sigs.k8s.io/yaml v1.4.0 -## explicit; go 1.12 +# sigs.k8s.io/structured-merge-diff/v6 v6.3.0 +## explicit; go 1.23 +sigs.k8s.io/structured-merge-diff/v6/fieldpath +sigs.k8s.io/structured-merge-diff/v6/merge +sigs.k8s.io/structured-merge-diff/v6/schema +sigs.k8s.io/structured-merge-diff/v6/typed +sigs.k8s.io/structured-merge-diff/v6/value +# sigs.k8s.io/yaml v1.6.0 +## explicit; go 1.22 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 # github.com/ceph/ceph-csi => ../ diff --git a/e2e/vendor/github.com/opencontainers/cgroups/LICENSE b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/LICENSE similarity index 100% rename from e2e/vendor/github.com/opencontainers/cgroups/LICENSE rename to e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/LICENSE diff --git a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/doc.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/doc.go similarity index 65% rename from e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/doc.go rename to e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/doc.go index ac48d030776..f4fbbff262e 100644 --- a/e2e/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/doc.go +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +groupName=kubescheduler.config.k8s.io - -package config +// Package fieldpath defines a way for referencing path elements (e.g., an +// index in an array, or a key in a map). It provides types for arranging these +// into paths for referencing nested fields, and for grouping those into sets, +// for referencing multiple nested fields. +package fieldpath diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/element.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/element.go new file mode 100644 index 00000000000..73436912cdd --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/element.go @@ -0,0 +1,388 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "iter" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +// PathElement describes how to select a child field given a containing object. +type PathElement struct { + // Exactly one of the following fields should be non-nil. + + // FieldName selects a single field from a map (reminder: this is also + // how structs are represented). The containing object must be a map. + FieldName *string + + // Key selects the list element which has fields matching those given. + // The containing object must be an associative list with map typed + // elements. They are sorted alphabetically. + Key *value.FieldList + + // Value selects the list element with the given value. The containing + // object must be an associative list with a primitive typed element + // (i.e., a set). + Value *value.Value + + // Index selects a list element by its index number. The containing + // object must be an atomic list. + Index *int +} + +// FieldNameElement creates a new FieldName PathElement. +func FieldNameElement(name string) PathElement { + return PathElement{FieldName: &name} +} + +// KeyElement creates a new Key PathElement with the key fields. +func KeyElement(fields ...value.Field) PathElement { + l := value.FieldList(fields) + return PathElement{Key: &l} +} + +// KeyElementByFields creates a new Key PathElement from names and values. +// `nameValues` must have an even number of entries, alternating +// names (type must be string) with values (type must be value.Value). If these +// conditions are not met, KeyByFields will panic--it's intended for static +// construction and shouldn't have user-produced values passed to it. +func KeyElementByFields(nameValues ...any) PathElement { + return PathElement{Key: KeyByFields(nameValues...)} +} + +// ValueElement creates a new Value PathElement. +func ValueElement(value value.Value) PathElement { + return PathElement{Value: &value} +} + +// IndexElement creates a new Index PathElement. +func IndexElement(index int) PathElement { + return PathElement{Index: &index} +} + +// Less provides an order for path elements. +func (e PathElement) Less(rhs PathElement) bool { + return e.Compare(rhs) < 0 +} + +// Compare provides an order for path elements. +func (e PathElement) Compare(rhs PathElement) int { + if e.FieldName != nil { + if rhs.FieldName == nil { + return -1 + } + return strings.Compare(*e.FieldName, *rhs.FieldName) + } else if rhs.FieldName != nil { + return 1 + } + + if e.Key != nil { + if rhs.Key == nil { + return -1 + } + return e.Key.Compare(*rhs.Key) + } else if rhs.Key != nil { + return 1 + } + + if e.Value != nil { + if rhs.Value == nil { + return -1 + } + return value.Compare(*e.Value, *rhs.Value) + } else if rhs.Value != nil { + return 1 + } + + if e.Index != nil { + if rhs.Index == nil { + return -1 + } + if *e.Index < *rhs.Index { + return -1 + } else if *e.Index == *rhs.Index { + return 0 + } + return 1 + } else if rhs.Index != nil { + return 1 + } + + return 0 +} + +// Equals returns true if both path elements are equal. +func (e PathElement) Equals(rhs PathElement) bool { + if e.FieldName != nil { + if rhs.FieldName == nil { + return false + } + return *e.FieldName == *rhs.FieldName + } else if rhs.FieldName != nil { + return false + } + if e.Key != nil { + if rhs.Key == nil { + return false + } + return e.Key.Equals(*rhs.Key) + } else if rhs.Key != nil { + return false + } + if e.Value != nil { + if rhs.Value == nil { + return false + } + return value.Equals(*e.Value, *rhs.Value) + } else if rhs.Value != nil { + return false + } + if e.Index != nil { + if rhs.Index == nil { + return false + } + return *e.Index == *rhs.Index + } else if rhs.Index != nil { + return false + } + return true +} + +// String presents the path element as a human-readable string. +func (e PathElement) String() string { + switch { + case e.FieldName != nil: + return "." + *e.FieldName + case e.Key != nil: + strs := make([]string, len(*e.Key)) + for i, k := range *e.Key { + strs[i] = fmt.Sprintf("%v=%v", k.Name, value.ToString(k.Value)) + } + // Keys are supposed to be sorted. + return "[" + strings.Join(strs, ",") + "]" + case e.Value != nil: + return fmt.Sprintf("[=%v]", value.ToString(*e.Value)) + case e.Index != nil: + return fmt.Sprintf("[%v]", *e.Index) + default: + return "{{invalid path element}}" + } +} + +// Copy returns a copy of the PathElement. +// This is not a full deep copy as any contained value.Value is not copied. +func (e PathElement) Copy() PathElement { + if e.FieldName != nil { + return PathElement{FieldName: e.FieldName} + } + if e.Key != nil { + c := e.Key.Copy() + return PathElement{Key: &c} + } + if e.Value != nil { + return PathElement{Value: e.Value} + } + if e.Index != nil { + return PathElement{Index: e.Index} + } + return e // zero value +} + +// KeyByFields is a helper function which constructs a key for an associative +// list type. `nameValues` must have an even number of entries, alternating +// names (type must be string) with values (type must be value.Value). If these +// conditions are not met, KeyByFields will panic--it's intended for static +// construction and shouldn't have user-produced values passed to it. +func KeyByFields(nameValues ...interface{}) *value.FieldList { + if len(nameValues)%2 != 0 { + panic("must have a value for every name") + } + out := value.FieldList{} + for i := 0; i < len(nameValues)-1; i += 2 { + out = append(out, value.Field{Name: nameValues[i].(string), Value: value.NewValueInterface(nameValues[i+1])}) + } + out.Sort() + return &out +} + +// PathElementSet is a set of path elements. +// TODO: serialize as a list. +type PathElementSet struct { + members sortedPathElements +} + +func MakePathElementSet(size int) PathElementSet { + return PathElementSet{ + members: make(sortedPathElements, 0, size), + } +} + +type sortedPathElements []PathElement + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (spe sortedPathElements) Len() int { return len(spe) } +func (spe sortedPathElements) Less(i, j int) bool { return spe[i].Less(spe[j]) } +func (spe sortedPathElements) Swap(i, j int) { spe[i], spe[j] = spe[j], spe[i] } + +// Copy returns a copy of the PathElementSet. +// This is not a full deep copy as any contained value.Value is not copied. +func (s PathElementSet) Copy() PathElementSet { + out := make(sortedPathElements, len(s.members)) + for i := range s.members { + out[i] = s.members[i].Copy() + } + return PathElementSet{members: out} +} + +// Insert adds pe to the set. +func (s *PathElementSet) Insert(pe PathElement) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, pe) + return + } + if s.members[loc].Equals(pe) { + return + } + s.members = append(s.members, PathElement{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = pe +} + +// Union returns a set containing elements that appear in either s or s2. +func (s *PathElementSet) Union(s2 *PathElementSet) *PathElementSet { + out := &PathElementSet{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].Less(s2.members[j]) { + out.members = append(out.members, s.members[i]) + i++ + } else { + out.members = append(out.members, s2.members[j]) + if !s2.members[j].Less(s.members[i]) { + i++ + } + j++ + } + } + + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + if j < len(s2.members) { + out.members = append(out.members, s2.members[j:]...) + } + return out +} + +// Intersection returns a set containing elements which appear in both s and s2. +func (s *PathElementSet) Intersection(s2 *PathElementSet) *PathElementSet { + out := &PathElementSet{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].Less(s2.members[j]) { + i++ + } else { + if !s2.members[j].Less(s.members[i]) { + out.members = append(out.members, s.members[i]) + i++ + } + j++ + } + } + + return out +} + +// Difference returns a set containing elements which appear in s but not in s2. +func (s *PathElementSet) Difference(s2 *PathElementSet) *PathElementSet { + out := &PathElementSet{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].Less(s2.members[j]) { + out.members = append(out.members, s.members[i]) + i++ + } else { + if !s2.members[j].Less(s.members[i]) { + i++ + } + j++ + } + } + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + return out +} + +// Size retuns the number of elements in the set. +func (s *PathElementSet) Size() int { return len(s.members) } + +// Has returns true if pe is a member of the set. +func (s *PathElementSet) Has(pe PathElement) bool { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].Less(pe) + }) + if loc == len(s.members) { + return false + } + if s.members[loc].Equals(pe) { + return true + } + return false +} + +// Equals returns true if s and s2 have exactly the same members. +func (s *PathElementSet) Equals(s2 *PathElementSet) bool { + if len(s.members) != len(s2.members) { + return false + } + for k := range s.members { + if !s.members[k].Equals(s2.members[k]) { + return false + } + } + return true +} + +// Iterate calls f for each PathElement in the set. The order is deterministic. +func (s *PathElementSet) Iterate(f func(PathElement)) { + for _, pe := range s.members { + f(pe) + } +} + +// All iterates over each PathElement in the set. The order is deterministic. +func (s *PathElementSet) All() iter.Seq[PathElement] { + return func(yield func(element PathElement) bool) { + for _, pe := range s.members { + if !yield(pe) { + return + } + } + } +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/fromvalue.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/fromvalue.go new file mode 100644 index 00000000000..78383e4015e --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/fromvalue.go @@ -0,0 +1,134 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +// SetFromValue creates a set containing every leaf field mentioned in v. +func SetFromValue(v value.Value) *Set { + s := NewSet() + + w := objectWalker{ + path: Path{}, + value: v, + allocator: value.NewFreelistAllocator(), + do: func(p Path) { s.Insert(p) }, + } + + w.walk() + return s +} + +type objectWalker struct { + path Path + value value.Value + allocator value.Allocator + + do func(Path) +} + +func (w *objectWalker) walk() { + switch { + case w.value.IsNull(): + case w.value.IsFloat(): + case w.value.IsInt(): + case w.value.IsString(): + case w.value.IsBool(): + // All leaf fields handled the same way (after the switch + // statement). + + // Descend + case w.value.IsList(): + // If the list were atomic, we'd break here, but we don't have + // a schema, so we can't tell. + l := w.value.AsListUsing(w.allocator) + defer w.allocator.Free(l) + iter := l.RangeUsing(w.allocator) + defer w.allocator.Free(iter) + for iter.Next() { + i, value := iter.Item() + w2 := *w + w2.path = append(w.path, w.GuessBestListPathElement(i, value)) + w2.value = value + w2.walk() + } + return + case w.value.IsMap(): + // If the map/struct were atomic, we'd break here, but we don't + // have a schema, so we can't tell. + + m := w.value.AsMapUsing(w.allocator) + defer w.allocator.Free(m) + m.IterateUsing(w.allocator, func(k string, val value.Value) bool { + w2 := *w + w2.path = append(w.path, PathElement{FieldName: &k}) + w2.value = val + w2.walk() + return true + }) + return + } + + // Leaf fields get added to the set. + if len(w.path) > 0 { + w.do(w.path) + } +} + +// AssociativeListCandidateFieldNames lists the field names which are +// considered keys if found in a list element. +var AssociativeListCandidateFieldNames = []string{ + "key", + "id", + "name", +} + +// GuessBestListPathElement guesses whether item is an associative list +// element, which should be referenced by key(s), or if it is not and therefore +// referencing by index is acceptable. Currently this is done by checking +// whether item has any of the fields listed in +// AssociativeListCandidateFieldNames which have scalar values. +func (w *objectWalker) GuessBestListPathElement(index int, item value.Value) PathElement { + if !item.IsMap() { + // Non map items could be parts of sets or regular "atomic" + // lists. We won't try to guess whether something should be a + // set or not. + return PathElement{Index: &index} + } + + m := item.AsMapUsing(w.allocator) + defer w.allocator.Free(m) + var keys value.FieldList + for _, name := range AssociativeListCandidateFieldNames { + f, ok := m.Get(name) + if !ok { + continue + } + // only accept primitive/scalar types as keys. + if f.IsNull() || f.IsMap() || f.IsList() { + continue + } + keys = append(keys, value.Field{Name: name, Value: f}) + } + if len(keys) > 0 { + keys.Sort() + return PathElement{Key: &keys} + } + return PathElement{Index: &index} +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/managers.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/managers.go new file mode 100644 index 00000000000..20499dc03da --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/managers.go @@ -0,0 +1,144 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" +) + +// APIVersion describes the version of an object or of a fieldset. +type APIVersion string + +type VersionedSet interface { + Set() *Set + APIVersion() APIVersion + Applied() bool +} + +// VersionedSet associates a version to a set. +type versionedSet struct { + set *Set + apiVersion APIVersion + applied bool +} + +func NewVersionedSet(set *Set, apiVersion APIVersion, applied bool) VersionedSet { + return versionedSet{ + set: set, + apiVersion: apiVersion, + applied: applied, + } +} + +func (v versionedSet) Set() *Set { + return v.set +} + +func (v versionedSet) APIVersion() APIVersion { + return v.apiVersion +} + +func (v versionedSet) Applied() bool { + return v.applied +} + +// ManagedFields is a map from manager to VersionedSet (what they own in +// what version). +type ManagedFields map[string]VersionedSet + +// Equals returns true if the two managedfields are the same, false +// otherwise. +func (lhs ManagedFields) Equals(rhs ManagedFields) bool { + if len(lhs) != len(rhs) { + return false + } + + for manager, left := range lhs { + right, ok := rhs[manager] + if !ok { + return false + } + if left.APIVersion() != right.APIVersion() || left.Applied() != right.Applied() { + return false + } + if !left.Set().Equals(right.Set()) { + return false + } + } + return true +} + +// Copy the list, this is mostly a shallow copy. +func (lhs ManagedFields) Copy() ManagedFields { + copy := ManagedFields{} + for manager, set := range lhs { + copy[manager] = set + } + return copy +} + +// Difference returns a symmetric difference between two Managers. If a +// given user's entry has version X in lhs and version Y in rhs, then +// the return value for that user will be from rhs. If the difference for +// a user is an empty set, that user will not be inserted in the map. +func (lhs ManagedFields) Difference(rhs ManagedFields) ManagedFields { + diff := ManagedFields{} + + for manager, left := range lhs { + right, ok := rhs[manager] + if !ok { + if !left.Set().Empty() { + diff[manager] = left + } + continue + } + + // If we have sets in both but their version + // differs, we don't even diff and keep the + // entire thing. + if left.APIVersion() != right.APIVersion() { + diff[manager] = right + continue + } + + newSet := left.Set().Difference(right.Set()).Union(right.Set().Difference(left.Set())) + if !newSet.Empty() { + diff[manager] = NewVersionedSet(newSet, right.APIVersion(), false) + } + } + + for manager, set := range rhs { + if _, ok := lhs[manager]; ok { + // Already done + continue + } + if !set.Set().Empty() { + diff[manager] = set + } + } + + return diff +} + +func (lhs ManagedFields) String() string { + s := strings.Builder{} + for k, v := range lhs { + fmt.Fprintf(&s, "%s:\n", k) + fmt.Fprintf(&s, "- Applied: %v\n", v.Applied()) + fmt.Fprintf(&s, "- APIVersion: %v\n", v.APIVersion()) + fmt.Fprintf(&s, "- Set: %v\n", v.Set()) + } + return s.String() +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/path.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/path.go new file mode 100644 index 00000000000..a865ec4252e --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/path.go @@ -0,0 +1,118 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +// Path describes how to select a potentially deeply-nested child field given a +// containing object. +type Path []PathElement + +func (fp Path) String() string { + strs := make([]string, len(fp)) + for i := range fp { + strs[i] = fp[i].String() + } + return strings.Join(strs, "") +} + +// Equals returns true if the two paths are equivalent. +func (fp Path) Equals(fp2 Path) bool { + if len(fp) != len(fp2) { + return false + } + for i := range fp { + if !fp[i].Equals(fp2[i]) { + return false + } + } + return true +} + +// Less provides a lexical order for Paths. +func (fp Path) Compare(rhs Path) int { + i := 0 + for { + if i >= len(fp) && i >= len(rhs) { + // Paths are the same length and all items are equal. + return 0 + } + if i >= len(fp) { + // LHS is shorter. + return -1 + } + if i >= len(rhs) { + // RHS is shorter. + return 1 + } + if c := fp[i].Compare(rhs[i]); c != 0 { + return c + } + // The items are equal; continue. + i++ + } +} + +func (fp Path) Copy() Path { + new := make(Path, len(fp)) + copy(new, fp) + return new +} + +// MakePath constructs a Path. The parts may be PathElements, ints, strings. +func MakePath(parts ...interface{}) (Path, error) { + var fp Path + for _, p := range parts { + switch t := p.(type) { + case PathElement: + fp = append(fp, t) + case int: + // TODO: Understand schema and object and convert this to the + // FieldSpecifier below if appropriate. + fp = append(fp, PathElement{Index: &t}) + case string: + fp = append(fp, PathElement{FieldName: &t}) + case *value.FieldList: + if len(*t) == 0 { + return nil, fmt.Errorf("associative list key type path elements must have at least one key (got zero)") + } + fp = append(fp, PathElement{Key: t}) + case value.Value: + // TODO: understand schema and verify that this is a set type + // TODO: make a copy of t + fp = append(fp, PathElement{Value: &t}) + default: + return nil, fmt.Errorf("unable to make %#v into a path element", p) + } + } + return fp, nil +} + +// MakePathOrDie panics if parts can't be turned into a path. Good for things +// that are known at complie time. +func MakePathOrDie(parts ...interface{}) Path { + fp, err := MakePath(parts...) + if err != nil { + panic(err) + } + return fp +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/pathelementmap.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/pathelementmap.go new file mode 100644 index 00000000000..ff7ee510c22 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/pathelementmap.go @@ -0,0 +1,114 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "sort" + + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +// PathElementValueMap is a map from PathElement to value.Value. +// +// TODO(apelisse): We have multiple very similar implementation of this +// for PathElementSet and SetNodeMap, so we could probably share the +// code. +type PathElementValueMap struct { + valueMap PathElementMap +} + +func MakePathElementValueMap(size int) PathElementValueMap { + return PathElementValueMap{ + valueMap: MakePathElementMap(size), + } +} + +type sortedPathElementValues []pathElementValue + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (spev sortedPathElementValues) Len() int { return len(spev) } +func (spev sortedPathElementValues) Less(i, j int) bool { + return spev[i].PathElement.Less(spev[j].PathElement) +} +func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } + +// Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. +func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + s.valueMap.Insert(pe, v) +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + v, ok := s.valueMap.Get(pe) + if !ok { + return nil, false + } + return v.(value.Value), true +} + +// PathElementValueMap is a map from PathElement to interface{}. +type PathElementMap struct { + members sortedPathElementValues +} + +type pathElementValue struct { + PathElement PathElement + Value interface{} +} + +func MakePathElementMap(size int) PathElementMap { + return PathElementMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +// Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. +func (s *PathElementMap) Insert(pe PathElement, v interface{}) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].PathElement.Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, pathElementValue{pe, v}) + return + } + if s.members[loc].PathElement.Equals(pe) { + s.members[loc].Value = v + return + } + s.members = append(s.members, pathElementValue{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = pathElementValue{pe, v} +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementMap) Get(pe PathElement) (interface{}, bool) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].PathElement.Less(pe) + }) + if loc == len(s.members) { + return nil, false + } + if s.members[loc].PathElement.Equals(pe) { + return s.members[loc].Value, true + } + return nil, false +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/serialize-pe.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/serialize-pe.go new file mode 100644 index 00000000000..f4b00c2ee54 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/serialize-pe.go @@ -0,0 +1,186 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" + + jsoniter "github.com/json-iterator/go" + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +var ErrUnknownPathElementType = errors.New("unknown path element type") + +const ( + // Field indicates that the content of this path element is a field's name + peField = "f" + + // Value indicates that the content of this path element is a field's value + peValue = "v" + + // Index indicates that the content of this path element is an index in an array + peIndex = "i" + + // Key indicates that the content of this path element is a key value map + peKey = "k" + + // Separator separates the type of a path element from the contents + peSeparator = ":" +) + +var ( + peFieldSepBytes = []byte(peField + peSeparator) + peValueSepBytes = []byte(peValue + peSeparator) + peIndexSepBytes = []byte(peIndex + peSeparator) + peKeySepBytes = []byte(peKey + peSeparator) + peSepBytes = []byte(peSeparator) +) + +// readJSONIter reads a Value from a JSON iterator. +// DO NOT EXPORT +// TODO: eliminate this https://github.com/kubernetes-sigs/structured-merge-diff/issues/202 +func readJSONIter(iter *jsoniter.Iterator) (value.Value, error) { + v := iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + return nil, iter.Error + } + return value.NewValueInterface(v), nil +} + +// writeJSONStream writes a value into a JSON stream. +// DO NOT EXPORT +// TODO: eliminate this https://github.com/kubernetes-sigs/structured-merge-diff/issues/202 +func writeJSONStream(v value.Value, stream *jsoniter.Stream) { + stream.WriteVal(v.Unstructured()) +} + +// DeserializePathElement parses a serialized path element +func DeserializePathElement(s string) (PathElement, error) { + b := []byte(s) + if len(b) < 2 { + return PathElement{}, errors.New("key must be 2 characters long:") + } + typeSep, b := b[:2], b[2:] + if typeSep[1] != peSepBytes[0] { + return PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch typeSep[0] { + case peFieldSepBytes[0]: + // Slice s rather than convert b, to save on + // allocations. + str := s[2:] + return PathElement{ + FieldName: &str, + }, nil + case peValueSepBytes[0]: + iter := readPool.BorrowIterator(b) + defer readPool.ReturnIterator(iter) + v, err := readJSONIter(iter) + if err != nil { + return PathElement{}, err + } + return PathElement{Value: &v}, nil + case peKeySepBytes[0]: + iter := readPool.BorrowIterator(b) + defer readPool.ReturnIterator(iter) + fields := value.FieldList{} + + iter.ReadObjectCB(func(iter *jsoniter.Iterator, key string) bool { + v, err := readJSONIter(iter) + if err != nil { + iter.Error = err + return false + } + fields = append(fields, value.Field{Name: key, Value: v}) + return true + }) + fields.Sort() + return PathElement{Key: &fields}, iter.Error + case peIndexSepBytes[0]: + i, err := strconv.Atoi(s[2:]) + if err != nil { + return PathElement{}, err + } + return PathElement{ + Index: &i, + }, nil + default: + return PathElement{}, ErrUnknownPathElementType + } +} + +var ( + readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool() + writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool() +) + +// SerializePathElement serializes a path element +func SerializePathElement(pe PathElement) (string, error) { + buf := strings.Builder{} + err := serializePathElementToWriter(&buf, pe) + return buf.String(), err +} + +func serializePathElementToWriter(w io.Writer, pe PathElement) error { + stream := writePool.BorrowStream(w) + defer writePool.ReturnStream(stream) + switch { + case pe.FieldName != nil: + if _, err := stream.Write(peFieldSepBytes); err != nil { + return err + } + stream.WriteRaw(*pe.FieldName) + case pe.Key != nil: + if _, err := stream.Write(peKeySepBytes); err != nil { + return err + } + stream.WriteObjectStart() + + for i, field := range *pe.Key { + if i > 0 { + stream.WriteMore() + } + stream.WriteObjectField(field.Name) + writeJSONStream(field.Value, stream) + } + stream.WriteObjectEnd() + case pe.Value != nil: + if _, err := stream.Write(peValueSepBytes); err != nil { + return err + } + writeJSONStream(*pe.Value, stream) + case pe.Index != nil: + if _, err := stream.Write(peIndexSepBytes); err != nil { + return err + } + stream.WriteInt(*pe.Index) + default: + return errors.New("invalid PathElement") + } + b := stream.Buffer() + err := stream.Flush() + // Help jsoniter manage its buffers--without this, the next + // use of the stream is likely to require an allocation. Look + // at the jsoniter stream code to understand why. They were probably + // optimizing for folks using the buffer directly. + stream.SetBuffer(b[:0]) + return err +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/serialize.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/serialize.go new file mode 100644 index 00000000000..b992b93c5fa --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/serialize.go @@ -0,0 +1,238 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "bytes" + "io" + "unsafe" + + jsoniter "github.com/json-iterator/go" +) + +func (s *Set) ToJSON() ([]byte, error) { + buf := bytes.Buffer{} + err := s.ToJSONStream(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (s *Set) ToJSONStream(w io.Writer) error { + stream := writePool.BorrowStream(w) + defer writePool.ReturnStream(stream) + + var r reusableBuilder + + stream.WriteObjectStart() + err := s.emitContentsV1(false, stream, &r) + if err != nil { + return err + } + stream.WriteObjectEnd() + return stream.Flush() +} + +func manageMemory(stream *jsoniter.Stream) error { + // Help jsoniter manage its buffers--without this, it does a bunch of + // alloctaions that are not necessary. They were probably optimizing + // for folks using the buffer directly. + b := stream.Buffer() + if len(b) > 4096 || cap(b)-len(b) < 2048 { + if err := stream.Flush(); err != nil { + return err + } + stream.SetBuffer(b[:0]) + } + return nil +} + +type reusableBuilder struct { + bytes.Buffer +} + +func (r *reusableBuilder) unsafeString() string { + b := r.Bytes() + return *(*string)(unsafe.Pointer(&b)) +} + +func (r *reusableBuilder) reset() *bytes.Buffer { + r.Reset() + return &r.Buffer +} + +func (s *Set) emitContentsV1(includeSelf bool, stream *jsoniter.Stream, r *reusableBuilder) error { + mi, ci := 0, 0 + first := true + preWrite := func() { + if first { + first = false + return + } + stream.WriteMore() + } + + if includeSelf && !(len(s.Members.members) == 0 && len(s.Children.members) == 0) { + preWrite() + stream.WriteObjectField(".") + stream.WriteEmptyObject() + } + + for mi < len(s.Members.members) && ci < len(s.Children.members) { + mpe := s.Members.members[mi] + cpe := s.Children.members[ci].pathElement + + if c := mpe.Compare(cpe); c < 0 { + preWrite() + if err := serializePathElementToWriter(r.reset(), mpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteEmptyObject() + mi++ + } else if c > 0 { + preWrite() + if err := serializePathElementToWriter(r.reset(), cpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteObjectStart() + if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil { + return err + } + stream.WriteObjectEnd() + ci++ + } else { + preWrite() + if err := serializePathElementToWriter(r.reset(), cpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteObjectStart() + if err := s.Children.members[ci].set.emitContentsV1(true, stream, r); err != nil { + return err + } + stream.WriteObjectEnd() + mi++ + ci++ + } + } + + for mi < len(s.Members.members) { + mpe := s.Members.members[mi] + + preWrite() + if err := serializePathElementToWriter(r.reset(), mpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteEmptyObject() + mi++ + } + + for ci < len(s.Children.members) { + cpe := s.Children.members[ci].pathElement + + preWrite() + if err := serializePathElementToWriter(r.reset(), cpe); err != nil { + return err + } + stream.WriteObjectField(r.unsafeString()) + stream.WriteObjectStart() + if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil { + return err + } + stream.WriteObjectEnd() + ci++ + } + + return manageMemory(stream) +} + +// FromJSON clears s and reads a JSON formatted set structure. +func (s *Set) FromJSON(r io.Reader) error { + // The iterator pool is completely useless for memory management, grrr. + iter := jsoniter.Parse(jsoniter.ConfigCompatibleWithStandardLibrary, r, 4096) + + found, _ := readIterV1(iter) + if found == nil { + *s = Set{} + } else { + *s = *found + } + return iter.Error +} + +// returns true if this subtree is also (or only) a member of parent; s is nil +// if there are no further children. +func readIterV1(iter *jsoniter.Iterator) (children *Set, isMember bool) { + iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool { + if key == "." { + isMember = true + iter.Skip() + return true + } + pe, err := DeserializePathElement(key) + if err == ErrUnknownPathElementType { + // Ignore these-- a future version maybe knows what + // they are. We drop these completely rather than try + // to preserve things we don't understand. + iter.Skip() + return true + } else if err != nil { + iter.ReportError("parsing key as path element", err.Error()) + iter.Skip() + return true + } + grandchildren, childIsMember := readIterV1(iter) + if childIsMember { + if children == nil { + children = &Set{} + } + m := &children.Members.members + // Since we expect that most of the time these will have been + // serialized in the right order, we just verify that and append. + appendOK := len(*m) == 0 || (*m)[len(*m)-1].Less(pe) + if appendOK { + *m = append(*m, pe) + } else { + children.Members.Insert(pe) + } + } + if grandchildren != nil { + if children == nil { + children = &Set{} + } + // Since we expect that most of the time these will have been + // serialized in the right order, we just verify that and append. + m := &children.Children.members + appendOK := len(*m) == 0 || (*m)[len(*m)-1].pathElement.Less(pe) + if appendOK { + *m = append(*m, setNode{pe, grandchildren}) + } else { + *children.Children.Descend(pe) = *grandchildren + } + } + return true + }) + if children == nil { + isMember = true + } + + return children, isMember +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/set.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/set.go new file mode 100644 index 00000000000..d2d8c8a42be --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/fieldpath/set.go @@ -0,0 +1,821 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "iter" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v6/value" + + "sigs.k8s.io/structured-merge-diff/v6/schema" +) + +// Set identifies a set of fields. +type Set struct { + // Members lists fields that are part of the set. + // TODO: will be serialized as a list of path elements. + Members PathElementSet + + // Children lists child fields which themselves have children that are + // members of the set. Appearance in this list does not imply membership. + // Note: this is a tree, not an arbitrary graph. + Children SetNodeMap +} + +// NewSet makes a set from a list of paths. +func NewSet(paths ...Path) *Set { + s := &Set{} + for _, p := range paths { + s.Insert(p) + } + return s +} + +// Copy returns a copy of the Set. +// This is not a full deep copy as any contained value.Value is not copied. +func (s *Set) Copy() *Set { + return &Set{ + Members: s.Members.Copy(), + Children: s.Children.Copy(), + } +} + +// Insert adds the field identified by `p` to the set. Important: parent fields +// are NOT added to the set; if that is desired, they must be added separately. +func (s *Set) Insert(p Path) { + if len(p) == 0 { + // Zero-length path identifies the entire object; we don't + // track top-level ownership. + return + } + for { + if len(p) == 1 { + s.Members.Insert(p[0]) + return + } + s = s.Children.Descend(p[0]) + p = p[1:] + } +} + +// Union returns a Set containing elements which appear in either s or s2. +func (s *Set) Union(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Union(&s2.Members), + Children: *s.Children.Union(&s2.Children), + } +} + +// Intersection returns a Set containing leaf elements which appear in both s +// and s2. Intersection can be constructed from Union and Difference operations +// (example in the tests) but it's much faster to do it in one pass. +func (s *Set) Intersection(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Intersection(&s2.Members), + Children: *s.Children.Intersection(&s2.Children), + } +} + +// Difference returns a Set containing elements which: +// * appear in s +// * do not appear in s2 +// +// In other words, for leaf fields, this acts like a regular set difference +// operation. When non leaf fields are compared with leaf fields ("parents" +// which contain "children"), the effect is: +// * parent - child = parent +// * child - parent = {empty set} +func (s *Set) Difference(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Difference(&s2.Members), + Children: *s.Children.Difference(s2), + } +} + +// RecursiveDifference returns a Set containing elements which: +// * appear in s +// * do not appear in s2 +// +// Compared to a regular difference, +// this removes every field **and its children** from s that is contained in s2. +// +// For example, with s containing `a.b.c` and s2 containing `a.b`, +// a RecursiveDifference will result in `a`, as the entire node `a.b` gets removed. +func (s *Set) RecursiveDifference(s2 *Set) *Set { + return &Set{ + Members: *s.Members.Difference(&s2.Members), + Children: *s.Children.RecursiveDifference(s2), + } +} + +// EnsureNamedFieldsAreMembers returns a Set that contains all the +// fields in s, as well as all the named fields that are typically not +// included. For example, a set made of "a.b.c" will end-up also owning +// "a" if it's a named fields but not "a.b" if it's a map. +func (s *Set) EnsureNamedFieldsAreMembers(sc *schema.Schema, tr schema.TypeRef) *Set { + members := PathElementSet{ + members: make(sortedPathElements, 0, s.Members.Size()+len(s.Children.members)), + } + atom, _ := sc.Resolve(tr) + members.members = append(members.members, s.Members.members...) + for _, node := range s.Children.members { + // Only insert named fields. + if node.pathElement.FieldName != nil && atom.Map != nil { + if _, has := atom.Map.FindField(*node.pathElement.FieldName); has { + members.Insert(node.pathElement) + } + } + } + return &Set{ + Members: members, + Children: *s.Children.EnsureNamedFieldsAreMembers(sc, tr), + } +} + +// MakePrefixMatcherOrDie is the same as PrefixMatcher except it panics if parts can't be +// turned into a SetMatcher. +func MakePrefixMatcherOrDie(parts ...interface{}) *SetMatcher { + result, err := PrefixMatcher(parts...) + if err != nil { + panic(err) + } + return result +} + +// PrefixMatcher creates a SetMatcher that matches all field paths prefixed by the given list of matcher path parts. +// The matcher parts may any of: +// +// - PathElementMatcher - for wildcards, `MatchAnyPathElement()` can be used as well. +// - PathElement - for any path element +// - value.FieldList - for listMap keys +// - value.Value - for scalar list elements +// - string - For field names +// - int - for array indices +func PrefixMatcher(parts ...interface{}) (*SetMatcher, error) { + current := MatchAnySet() // match all field path suffixes + for i := len(parts) - 1; i >= 0; i-- { + part := parts[i] + var pattern PathElementMatcher + switch t := part.(type) { + case PathElementMatcher: + // any path matcher, including wildcard + pattern = t + case PathElement: + // any path element + pattern = PathElementMatcher{PathElement: t} + case *value.FieldList: + // a listMap key + if len(*t) == 0 { + return nil, fmt.Errorf("associative list key type path elements must have at least one key (got zero)") + } + pattern = PathElementMatcher{PathElement: PathElement{Key: t}} + case value.Value: + // a scalar or set-type list element + pattern = PathElementMatcher{PathElement: PathElement{Value: &t}} + case string: + // a plain field name + pattern = PathElementMatcher{PathElement: PathElement{FieldName: &t}} + case int: + // a plain list index + pattern = PathElementMatcher{PathElement: PathElement{Index: &t}} + default: + return nil, fmt.Errorf("unexpected type %T", t) + } + current = &SetMatcher{ + members: []*SetMemberMatcher{{ + Path: pattern, + Child: current, + }}, + } + } + return current, nil +} + +// MatchAnyPathElement returns a PathElementMatcher that matches any path element. +func MatchAnyPathElement() PathElementMatcher { + return PathElementMatcher{Wildcard: true} +} + +// MatchAnySet returns a SetMatcher that matches any set. +func MatchAnySet() *SetMatcher { + return &SetMatcher{wildcard: true} +} + +// NewSetMatcher returns a new SetMatcher. +// Wildcard members take precedent over non-wildcard members; +// all non-wildcard members are ignored if there is a wildcard members. +func NewSetMatcher(wildcard bool, members ...*SetMemberMatcher) *SetMatcher { + sort.Sort(sortedMemberMatcher(members)) + return &SetMatcher{wildcard: wildcard, members: members} +} + +// SetMatcher defines a matcher that matches fields in a Set. +// SetMatcher is structured much like a Set but with wildcard support. +type SetMatcher struct { + // wildcard indicates that all members and children are included in the match. + // If set, the members field is ignored. + wildcard bool + // members provides patterns to match the members of a Set. + // Wildcard members are sorted before non-wildcards and take precedent over + // non-wildcard members. + members sortedMemberMatcher +} + +type sortedMemberMatcher []*SetMemberMatcher + +func (s sortedMemberMatcher) Len() int { return len(s) } +func (s sortedMemberMatcher) Less(i, j int) bool { return s[i].Path.Less(s[j].Path) } +func (s sortedMemberMatcher) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortedMemberMatcher) Find(p PathElementMatcher) (location int, ok bool) { + return sort.Find(len(s), func(i int) int { + return s[i].Path.Compare(p) + }) +} + +// Merge merges s and s2 and returns a SetMatcher that matches all field paths matched by either s or s2. +// During the merge, members of s and s2 with the same PathElementMatcher merged into a single member +// with the children of each merged by calling this function recursively. +func (s *SetMatcher) Merge(s2 *SetMatcher) *SetMatcher { + if s.wildcard || s2.wildcard { + return NewSetMatcher(true) + } + merged := make(sortedMemberMatcher, len(s.members), len(s.members)+len(s2.members)) + copy(merged, s.members) + for _, m := range s2.members { + if i, ok := s.members.Find(m.Path); ok { + // since merged is a shallow copy, do not modify elements in place + merged[i] = &SetMemberMatcher{ + Path: merged[i].Path, + Child: merged[i].Child.Merge(m.Child), + } + } else { + merged = append(merged, m) + } + } + return NewSetMatcher(false, merged...) // sort happens here +} + +// SetMemberMatcher defines a matcher that matches the members of a Set. +// SetMemberMatcher is structured much like the elements of a SetNodeMap, but +// with wildcard support. +type SetMemberMatcher struct { + // Path provides a matcher to match members of a Set. + // If Path is a wildcard, all members of a Set are included in the match. + // Otherwise, if any Path is Equal to a member of a Set, that member is + // included in the match and the children of that member are matched + // against the Child matcher. + Path PathElementMatcher + + // Child provides a matcher to use for the children of matched members of a Set. + Child *SetMatcher +} + +// PathElementMatcher defined a path matcher for a PathElement. +type PathElementMatcher struct { + // Wildcard indicates that all PathElements are matched by this matcher. + // If set, PathElement is ignored. + Wildcard bool + + // PathElement indicates that a PathElement is matched if it is Equal + // to this PathElement. + PathElement +} + +func (p PathElementMatcher) Equals(p2 PathElementMatcher) bool { + return p.Wildcard != p2.Wildcard && p.PathElement.Equals(p2.PathElement) +} + +func (p PathElementMatcher) Less(p2 PathElementMatcher) bool { + if p.Wildcard && !p2.Wildcard { + return true + } else if p2.Wildcard { + return false + } + return p.PathElement.Less(p2.PathElement) +} + +func (p PathElementMatcher) Compare(p2 PathElementMatcher) int { + if p.Wildcard && !p2.Wildcard { + return -1 + } else if p2.Wildcard { + return 1 + } + return p.PathElement.Compare(p2.PathElement) +} + +// FilterIncludeMatches returns a Set with only the field paths that match. +func (s *Set) FilterIncludeMatches(pattern *SetMatcher) *Set { + if pattern.wildcard { + return s + } + + members := PathElementSet{} + for _, m := range s.Members.members { + for _, pm := range pattern.members { + if pm.Path.Wildcard || pm.Path.PathElement.Equals(m) { + members.Insert(m) + break + } + } + } + return &Set{ + Members: members, + Children: *s.Children.FilterIncludeMatches(pattern), + } +} + +// Size returns the number of members of the set. +func (s *Set) Size() int { + return s.Members.Size() + s.Children.Size() +} + +// Empty returns true if there are no members of the set. It is a separate +// function from Size since it's common to check whether size > 0, and +// potentially much faster to return as soon as a single element is found. +func (s *Set) Empty() bool { + if s.Members.Size() > 0 { + return false + } + return s.Children.Empty() +} + +// Has returns true if the field referenced by `p` is a member of the set. +func (s *Set) Has(p Path) bool { + if len(p) == 0 { + // No one owns "the entire object" + return false + } + for { + if len(p) == 1 { + return s.Members.Has(p[0]) + } + var ok bool + s, ok = s.Children.Get(p[0]) + if !ok { + return false + } + p = p[1:] + } +} + +// Equals returns true if s and s2 have exactly the same members. +func (s *Set) Equals(s2 *Set) bool { + return s.Members.Equals(&s2.Members) && s.Children.Equals(&s2.Children) +} + +// String returns the set one element per line. +func (s *Set) String() string { + elements := []string{} + s.Iterate(func(p Path) { + elements = append(elements, p.String()) + }) + return strings.Join(elements, "\n") +} + +// Iterate calls f once for each field that is a member of the set (preorder +// DFS). The path passed to f will be reused so make a copy if you wish to keep +// it. +func (s *Set) Iterate(f func(Path)) { + s.iteratePrefix(Path{}, f) +} + +// All iterates over each Path in the set (preorder DFS). +func (s *Set) All() iter.Seq[Path] { + return func(yield func(Path) bool) { + s.Iterate(func(p Path) { + yield(p) + }) + } +} + +func (s *Set) iteratePrefix(prefix Path, f func(Path)) { + s.Members.Iterate(func(pe PathElement) { f(append(prefix, pe)) }) + s.Children.iteratePrefix(prefix, f) +} + +// WithPrefix returns the subset of paths which begin with the given prefix, +// with the prefix not included. +func (s *Set) WithPrefix(pe PathElement) *Set { + subset, ok := s.Children.Get(pe) + if !ok { + return NewSet() + } + return subset +} + +// Leaves returns a set containing only the leaf paths +// of a set. +func (s *Set) Leaves() *Set { + leaves := PathElementSet{} + im := 0 + ic := 0 + + // any members that are not also children are leaves +outer: + for im < len(s.Members.members) { + member := s.Members.members[im] + + for ic < len(s.Children.members) { + d := member.Compare(s.Children.members[ic].pathElement) + if d == 0 { + ic++ + im++ + continue outer + } else if d < 0 { + break + } else /* if d > 0 */ { + ic++ + } + } + leaves.members = append(leaves.members, member) + im++ + } + + return &Set{ + Members: leaves, + Children: *s.Children.Leaves(), + } +} + +// setNode is a pair of PathElement / Set, for the purpose of expressing +// nested set membership. +type setNode struct { + pathElement PathElement + set *Set +} + +// SetNodeMap is a map of PathElement to subset. +type SetNodeMap struct { + members sortedSetNode +} + +type sortedSetNode []setNode + +// Implement the sort interface; this would permit bulk creation, which would +// be faster than doing it one at a time via Insert. +func (s sortedSetNode) Len() int { return len(s) } +func (s sortedSetNode) Less(i, j int) bool { return s[i].pathElement.Less(s[j].pathElement) } +func (s sortedSetNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Copy returns a copy of the SetNodeMap. +// This is not a full deep copy as any contained value.Value is not copied. +func (s *SetNodeMap) Copy() SetNodeMap { + out := make(sortedSetNode, len(s.members)) + for i, v := range s.members { + out[i] = setNode{pathElement: v.pathElement.Copy(), set: v.set.Copy()} + } + return SetNodeMap{members: out} +} + +// Descend adds pe to the set if necessary, returning the associated subset. +func (s *SetNodeMap) Descend(pe PathElement) *Set { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].pathElement.Less(pe) + }) + if loc == len(s.members) { + s.members = append(s.members, setNode{pathElement: pe, set: &Set{}}) + return s.members[loc].set + } + if s.members[loc].pathElement.Equals(pe) { + return s.members[loc].set + } + s.members = append(s.members, setNode{}) + copy(s.members[loc+1:], s.members[loc:]) + s.members[loc] = setNode{pathElement: pe, set: &Set{}} + return s.members[loc].set +} + +// Size returns the sum of the number of members of all subsets. +func (s *SetNodeMap) Size() int { + count := 0 + for _, v := range s.members { + count += v.set.Size() + } + return count +} + +// Empty returns false if there's at least one member in some child set. +func (s *SetNodeMap) Empty() bool { + for _, n := range s.members { + if !n.set.Empty() { + return false + } + } + return true +} + +// Get returns (the associated set, true) or (nil, false) if there is none. +func (s *SetNodeMap) Get(pe PathElement) (*Set, bool) { + loc := sort.Search(len(s.members), func(i int) bool { + return !s.members[i].pathElement.Less(pe) + }) + if loc == len(s.members) { + return nil, false + } + if s.members[loc].pathElement.Equals(pe) { + return s.members[loc].set, true + } + return nil, false +} + +// Equals returns true if s and s2 have the same structure (same nested +// child sets). +func (s *SetNodeMap) Equals(s2 *SetNodeMap) bool { + if len(s.members) != len(s2.members) { + return false + } + for i := range s.members { + if !s.members[i].pathElement.Equals(s2.members[i].pathElement) { + return false + } + if !s.members[i].set.Equals(s2.members[i].set) { + return false + } + } + return true +} + +// Union returns a SetNodeMap with members that appear in either s or s2. +func (s *SetNodeMap) Union(s2 *SetNodeMap) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].pathElement.Less(s2.members[j].pathElement) { + out.members = append(out.members, s.members[i]) + i++ + } else { + if !s2.members[j].pathElement.Less(s.members[i].pathElement) { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set.Union(s2.members[j].set)}) + i++ + } else { + out.members = append(out.members, s2.members[j]) + } + j++ + } + } + + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + if j < len(s2.members) { + out.members = append(out.members, s2.members[j:]...) + } + return out +} + +// Intersection returns a SetNodeMap with members that appear in both s and s2. +func (s *SetNodeMap) Intersection(s2 *SetNodeMap) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.members) { + if s.members[i].pathElement.Less(s2.members[j].pathElement) { + i++ + } else { + if !s2.members[j].pathElement.Less(s.members[i].pathElement) { + res := s.members[i].set.Intersection(s2.members[j].set) + if !res.Empty() { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: res}) + } + i++ + } + j++ + } + } + return out +} + +// Difference returns a SetNodeMap with members that appear in s but not in s2. +func (s *SetNodeMap) Difference(s2 *Set) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.Children.members) { + if s.members[i].pathElement.Less(s2.Children.members[j].pathElement) { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set}) + i++ + } else { + if !s2.Children.members[j].pathElement.Less(s.members[i].pathElement) { + + diff := s.members[i].set.Difference(s2.Children.members[j].set) + // We aren't permitted to add nodes with no elements. + if !diff.Empty() { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: diff}) + } + + i++ + } + j++ + } + } + + if i < len(s.members) { + out.members = append(out.members, s.members[i:]...) + } + return out +} + +// RecursiveDifference returns a SetNodeMap with members that appear in s but not in s2. +// +// Compared to a regular difference, +// this removes every field **and its children** from s that is contained in s2. +// +// For example, with s containing `a.b.c` and s2 containing `a.b`, +// a RecursiveDifference will result in `a`, as the entire node `a.b` gets removed. +func (s *SetNodeMap) RecursiveDifference(s2 *Set) *SetNodeMap { + out := &SetNodeMap{} + + i, j := 0, 0 + for i < len(s.members) && j < len(s2.Children.members) { + if s.members[i].pathElement.Less(s2.Children.members[j].pathElement) { + if !s2.Members.Has(s.members[i].pathElement) { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set}) + } + i++ + } else { + if !s2.Children.members[j].pathElement.Less(s.members[i].pathElement) { + if !s2.Members.Has(s.members[i].pathElement) { + diff := s.members[i].set.RecursiveDifference(s2.Children.members[j].set) + if !diff.Empty() { + out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: diff}) + } + } + i++ + } + j++ + } + } + + if i < len(s.members) { + for _, c := range s.members[i:] { + if !s2.Members.Has(c.pathElement) { + out.members = append(out.members, c) + } + } + } + + return out +} + +// EnsureNamedFieldsAreMembers returns a set that contains all the named fields along with the leaves. +func (s *SetNodeMap) EnsureNamedFieldsAreMembers(sc *schema.Schema, tr schema.TypeRef) *SetNodeMap { + out := make(sortedSetNode, 0, s.Size()) + atom, _ := sc.Resolve(tr) + for _, member := range s.members { + tr := schema.TypeRef{} + if member.pathElement.FieldName != nil && atom.Map != nil { + tr = atom.Map.ElementType + if sf, ok := atom.Map.FindField(*member.pathElement.FieldName); ok { + tr = sf.Type + } + } else if member.pathElement.Key != nil && atom.List != nil { + tr = atom.List.ElementType + } + out = append(out, setNode{ + pathElement: member.pathElement, + set: member.set.EnsureNamedFieldsAreMembers(sc, tr), + }) + } + + return &SetNodeMap{ + members: out, + } +} + +// FilterIncludeMatches returns a SetNodeMap with only the field paths that match the matcher. +func (s *SetNodeMap) FilterIncludeMatches(pattern *SetMatcher) *SetNodeMap { + if pattern.wildcard { + return s + } + + var out sortedSetNode + for _, member := range s.members { + for _, c := range pattern.members { + if c.Path.Wildcard || c.Path.PathElement.Equals(member.pathElement) { + childSet := member.set.FilterIncludeMatches(c.Child) + if childSet.Size() > 0 { + out = append(out, setNode{ + pathElement: member.pathElement, + set: childSet, + }) + } + break + } + } + } + + return &SetNodeMap{ + members: out, + } +} + +// Iterate calls f for each PathElement in the set. +func (s *SetNodeMap) Iterate(f func(PathElement)) { + for _, n := range s.members { + f(n.pathElement) + } +} + +// All iterates over each PathElement in the set. +func (s *SetNodeMap) All() iter.Seq[PathElement] { + return func(yield func(PathElement) bool) { + s.Iterate(func(pe PathElement) { + yield(pe) + }) + } +} + +func (s *SetNodeMap) iteratePrefix(prefix Path, f func(Path)) { + for _, n := range s.members { + pe := n.pathElement + n.set.iteratePrefix(append(prefix, pe), f) + } +} + +// Leaves returns a SetNodeMap containing +// only setNodes with leaf PathElements. +func (s *SetNodeMap) Leaves() *SetNodeMap { + out := &SetNodeMap{} + out.members = make(sortedSetNode, len(s.members)) + for i, n := range s.members { + out.members[i] = setNode{ + pathElement: n.pathElement, + set: n.set.Leaves(), + } + } + return out +} + +// Filter defines an interface for excluding field paths from a set. +// NewExcludeSetFilter can be used to create a filter that removes +// specific field paths and all of their children. +// NewIncludeMatcherFilter can be used to create a filter that removes all fields except +// the fields that match a field path matcher. PrefixMatcher and MakePrefixMatcherOrDie +// can be used to define field path patterns. +type Filter interface { + // Filter returns a filtered copy of the set. + Filter(*Set) *Set +} + +// NewExcludeSetFilter returns a filter that removes field paths in the exclude set. +func NewExcludeSetFilter(exclude *Set) Filter { + return excludeFilter{exclude} +} + +// NewExcludeFilterSetMap converts a map of APIVersion to exclude set to a map of APIVersion to exclude filters. +func NewExcludeFilterSetMap(resetFields map[APIVersion]*Set) map[APIVersion]Filter { + result := make(map[APIVersion]Filter) + for k, v := range resetFields { + result[k] = excludeFilter{v} + } + return result +} + +type excludeFilter struct { + excludeSet *Set +} + +func (t excludeFilter) Filter(set *Set) *Set { + return set.RecursiveDifference(t.excludeSet) +} + +// NewIncludeMatcherFilter returns a filter that only includes field paths that match. +// If no matchers are provided, the filter includes all field paths. +// PrefixMatcher and MakePrefixMatcherOrDie can help create basic matcher. +func NewIncludeMatcherFilter(matchers ...*SetMatcher) Filter { + if len(matchers) == 0 { + return includeMatcherFilter{&SetMatcher{wildcard: true}} + } + matcher := matchers[0] + for i := 1; i < len(matchers); i++ { + matcher = matcher.Merge(matchers[i]) + } + + return includeMatcherFilter{matcher} +} + +type includeMatcherFilter struct { + matcher *SetMatcher +} + +func (pf includeMatcherFilter) Filter(set *Set) *Set { + return set.FilterIncludeMatches(pf.matcher) +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/merge/conflict.go similarity index 98% rename from e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go rename to e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/merge/conflict.go index f1aa258609e..dea64707b15 100644 --- a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/merge/conflict.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" ) // Conflict is a conflict on a specific field with the current manager of diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/merge/update.go similarity index 99% rename from e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go rename to e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/merge/update.go index 455818ff858..99d722f8724 100644 --- a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/merge/update.go @@ -15,9 +15,10 @@ package merge import ( "fmt" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" - "sigs.k8s.io/structured-merge-diff/v4/typed" - "sigs.k8s.io/structured-merge-diff/v4/value" + + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/typed" + "sigs.k8s.io/structured-merge-diff/v6/value" ) // Converter is an interface to the conversion logic. The converter diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/doc.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/doc.go new file mode 100644 index 00000000000..9081ccbc73c --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package schema defines a targeted schema language which allows one to +// represent all the schema information necessary to perform "structured" +// merges and diffs. +// +// Due to the targeted nature of the data model, the schema language can fit in +// just a few hundred lines of go code, making it much more understandable and +// concise than e.g. OpenAPI. +// +// This schema was derived by observing the API objects used by Kubernetes, and +// formalizing a model which allows certain operations ("apply") to be more +// well defined. It is currently missing one feature: one-of ("unions"). +package schema diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go new file mode 100644 index 00000000000..5d3707a5b50 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go @@ -0,0 +1,375 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "sync" +) + +// Schema is a list of named types. +// +// Schema types are indexed in a map before the first search so this type +// should be considered immutable. +type Schema struct { + Types []TypeDef `yaml:"types,omitempty"` + + once sync.Once + m map[string]TypeDef + + lock sync.Mutex + // Cached results of resolving type references to atoms. Only stores + // type references which require fields of Atom to be overriden. + resolvedTypes map[TypeRef]Atom +} + +// A TypeSpecifier references a particular type in a schema. +type TypeSpecifier struct { + Type TypeRef `yaml:"type,omitempty"` + Schema Schema `yaml:"schema,omitempty"` +} + +// TypeDef represents a named type in a schema. +type TypeDef struct { + // Top level types should be named. Every type must have a unique name. + Name string `yaml:"name,omitempty"` + + Atom `yaml:"atom,omitempty,inline"` +} + +// TypeRef either refers to a named type or declares an inlined type. +type TypeRef struct { + // Either the name or one member of Atom should be set. + NamedType *string `yaml:"namedType,omitempty"` + Inlined Atom `yaml:",inline,omitempty"` + + // If this reference refers to a map-type or list-type, this field overrides + // the `ElementRelationship` of the referred type when resolved. + // If this field is nil, then it has no effect. + // See `Map` and `List` for more information about `ElementRelationship` + ElementRelationship *ElementRelationship `yaml:"elementRelationship,omitempty"` +} + +// Atom represents the smallest possible pieces of the type system. +// Each set field in the Atom represents a possible type for the object. +// If none of the fields are set, any object will fail validation against the atom. +type Atom struct { + *Scalar `yaml:"scalar,omitempty"` + *List `yaml:"list,omitempty"` + *Map `yaml:"map,omitempty"` +} + +// Scalar (AKA "primitive") represents a type which has a single value which is +// either numeric, string, or boolean, or untyped for any of them. +// +// TODO: split numeric into float/int? Something even more fine-grained? +type Scalar string + +const ( + Numeric = Scalar("numeric") + String = Scalar("string") + Boolean = Scalar("boolean") + Untyped = Scalar("untyped") +) + +// ElementRelationship is an enum of the different possible relationships +// between the elements of container types (maps, lists). +type ElementRelationship string + +const ( + // Associative only applies to lists (see the documentation there). + Associative = ElementRelationship("associative") + // Atomic makes container types (lists, maps) behave + // as scalars / leaf fields + Atomic = ElementRelationship("atomic") + // Separable means the items of the container type have no particular + // relationship (default behavior for maps). + Separable = ElementRelationship("separable") +) + +// Map is a key-value pair. Its default semantics are the same as an +// associative list, but: +// - It is serialized differently: +// map: {"k": {"value": "v"}} +// list: [{"key": "k", "value": "v"}] +// - Keys must be string typed. +// - Keys can't have multiple components. +// +// Optionally, maps may be atomic (for example, imagine representing an RGB +// color value--it doesn't make sense to have different actors own the R and G +// values). +// +// Maps may also represent a type which is composed of a number of different fields. +// Each field has a name and a type. +// +// Fields are indexed in a map before the first search so this type +// should be considered immutable. +type Map struct { + // Each struct field appears exactly once in this list. The order in + // this list defines the canonical field ordering. + Fields []StructField `yaml:"fields,omitempty"` + + // A Union is a grouping of fields with special rules. It may refer to + // one or more fields in the above list. A given field from the above + // list may be referenced in exactly 0 or 1 places in the below list. + // One can have multiple unions in the same struct, but the fields can't + // overlap between unions. + Unions []Union `yaml:"unions,omitempty"` + + // ElementType is the type of the structs's unknown fields. + ElementType TypeRef `yaml:"elementType,omitempty"` + + // ElementRelationship states the relationship between the map's items. + // * `separable` (or unset) implies that each element is 100% independent. + // * `atomic` implies that all elements depend on each other, and this + // is effectively a scalar / leaf field; it doesn't make sense for + // separate actors to set the elements. Example: an RGB color struct; + // it would never make sense to "own" only one component of the + // color. + // The default behavior for maps is `separable`; it's permitted to + // leave this unset to get the default behavior. + ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` + + once sync.Once + m map[string]StructField +} + +// FindField is a convenience function that returns the referenced StructField, +// if it exists, or (nil, false) if it doesn't. +func (m *Map) FindField(name string) (StructField, bool) { + m.once.Do(func() { + m.m = make(map[string]StructField, len(m.Fields)) + for _, field := range m.Fields { + m.m[field.Name] = field + } + }) + sf, ok := m.m[name] + return sf, ok +} + +// CopyInto this instance of Map into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (m *Map) CopyInto(dst *Map) { + if dst == nil { + return + } + + // Map type is considered immutable so sharing references + dst.Fields = m.Fields + dst.ElementType = m.ElementType + dst.Unions = m.Unions + dst.ElementRelationship = m.ElementRelationship + + if m.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = m.m + }) + } +} + +// UnionFields are mapping between the fields that are part of the union and +// their discriminated value. The discriminated value has to be set, and +// should not conflict with other discriminated value in the list. +type UnionField struct { + // FieldName is the name of the field that is part of the union. This + // is the serialized form of the field. + FieldName string `yaml:"fieldName"` + // Discriminatorvalue is the value of the discriminator to + // select that field. If the union doesn't have a discriminator, + // this field is ignored. + DiscriminatorValue string `yaml:"discriminatorValue"` +} + +// Union, or oneof, means that only one of multiple fields of a structure can be +// set at a time. Setting the discriminator helps clearing oher fields: +// - If discriminator changed to non-nil, and a new field has been added +// that doesn't match, an error is returned, +// - If discriminator hasn't changed and two fields or more are set, an +// error is returned, +// - If discriminator changed to non-nil, all other fields but the +// discriminated one will be cleared, +// - Otherwise, If only one field is left, update discriminator to that value. +type Union struct { + // Discriminator, if present, is the name of the field that + // discriminates fields in the union. The mapping between the value of + // the discriminator and the field is done by using the Fields list + // below. + Discriminator *string `yaml:"discriminator,omitempty"` + + // DeduceInvalidDiscriminator indicates if the discriminator + // should be updated automatically based on the fields set. This + // typically defaults to false since we don't want to deduce by + // default (the behavior exists to maintain compatibility on + // existing types and shouldn't be used for new types). + DeduceInvalidDiscriminator bool `yaml:"deduceInvalidDiscriminator,omitempty"` + + // This is the list of fields that belong to this union. All the + // fields present in here have to be part of the parent + // structure. Discriminator (if oneOf has one), is NOT included in + // this list. The value for field is how we map the name of the field + // to actual value for discriminator. + Fields []UnionField `yaml:"fields,omitempty"` +} + +// StructField pairs a field name with a field type. +type StructField struct { + // Name is the field name. + Name string `yaml:"name,omitempty"` + // Type is the field type. + Type TypeRef `yaml:"type,omitempty"` + // Default value for the field, nil if not present. + Default interface{} `yaml:"default,omitempty"` +} + +// List represents a type which contains a zero or more elements, all of the +// same subtype. Lists may be either associative: each element is more or less +// independent and could be managed by separate entities in the system; or +// atomic, where the elements are heavily dependent on each other: it is not +// sensible to change one element without considering the ramifications on all +// the other elements. +type List struct { + // ElementType is the type of the list's elements. + ElementType TypeRef `yaml:"elementType,omitempty"` + + // ElementRelationship states the relationship between the list's elements + // and must have one of these values: + // * `atomic`: the list is treated as a single entity, like a scalar. + // * `associative`: + // - If the list element is a scalar, the list is treated as a set. + // - If the list element is a map, the list is treated as a map. + // There is no default for this value for lists; all schemas must + // explicitly state the element relationship for all lists. + ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` + + // Iff ElementRelationship is `associative`, and the element type is + // map, then Keys must have non-zero length, and it lists the fields + // of the element's map type which are to be used as the keys of the + // list. + // + // TODO: change this to "non-atomic struct" above and make the code reflect this. + // + // Each key must refer to a single field name (no nesting, not JSONPath). + Keys []string `yaml:"keys,omitempty"` +} + +// FindNamedType is a convenience function that returns the referenced TypeDef, +// if it exists, or (nil, false) if it doesn't. +func (s *Schema) FindNamedType(name string) (TypeDef, bool) { + s.once.Do(func() { + s.m = make(map[string]TypeDef, len(s.Types)) + for _, t := range s.Types { + s.m[t.Name] = t + } + }) + t, ok := s.m[name] + return t, ok +} + +func (s *Schema) resolveNoOverrides(tr TypeRef) (Atom, bool) { + result := Atom{} + + if tr.NamedType != nil { + t, ok := s.FindNamedType(*tr.NamedType) + if !ok { + return Atom{}, false + } + + result = t.Atom + } else { + result = tr.Inlined + } + + return result, true +} + +// Resolve is a convenience function which returns the atom referenced, whether +// it is inline or named. Returns (Atom{}, false) if the type can't be resolved. +// +// This allows callers to not care about the difference between a (possibly +// inlined) reference and a definition. +func (s *Schema) Resolve(tr TypeRef) (Atom, bool) { + // If this is a plain reference with no overrides, just return the type + if tr.ElementRelationship == nil { + return s.resolveNoOverrides(tr) + } + + s.lock.Lock() + defer s.lock.Unlock() + + if s.resolvedTypes == nil { + s.resolvedTypes = make(map[TypeRef]Atom) + } + + var result Atom + var exists bool + + // Return cached result if available + // If not, calculate result and cache it + if result, exists = s.resolvedTypes[tr]; !exists { + if result, exists = s.resolveNoOverrides(tr); exists { + // Allow field-level electives to override the referred type's modifiers + switch { + case result.Map != nil: + mapCopy := Map{} + result.Map.CopyInto(&mapCopy) + mapCopy.ElementRelationship = *tr.ElementRelationship + result.Map = &mapCopy + case result.List != nil: + listCopy := *result.List + listCopy.ElementRelationship = *tr.ElementRelationship + result.List = &listCopy + case result.Scalar != nil: + return Atom{}, false + default: + return Atom{}, false + } + } else { + return Atom{}, false + } + + // Save result. If it is nil, that is also recorded as not existing. + s.resolvedTypes[tr] = result + } + + return result, true +} + +// Clones this instance of Schema into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (s *Schema) CopyInto(dst *Schema) { + if dst == nil { + return + } + + // Schema type is considered immutable so sharing references + dst.Types = s.Types + + if s.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = s.m + }) + } +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/equals.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/equals.go new file mode 100644 index 00000000000..b668eff8339 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/equals.go @@ -0,0 +1,202 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import "reflect" + +// Equals returns true iff the two Schemas are equal. +func (a *Schema) Equals(b *Schema) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + + if len(a.Types) != len(b.Types) { + return false + } + for i := range a.Types { + if !a.Types[i].Equals(&b.Types[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two TypeRefs are equal. +// +// Note that two typerefs that have an equivalent type but where one is +// inlined and the other is named, are not considered equal. +func (a *TypeRef) Equals(b *TypeRef) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.NamedType == nil) != (b.NamedType == nil) { + return false + } + if a.NamedType != nil { + if *a.NamedType != *b.NamedType { + return false + } + //return true + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + return a.Inlined.Equals(&b.Inlined) +} + +// Equals returns true iff the two TypeDefs are equal. +func (a *TypeDef) Equals(b *TypeDef) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.Name != b.Name { + return false + } + return a.Atom.Equals(&b.Atom) +} + +// Equals returns true iff the two Atoms are equal. +func (a *Atom) Equals(b *Atom) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.Scalar == nil) != (b.Scalar == nil) { + return false + } + if (a.List == nil) != (b.List == nil) { + return false + } + if (a.Map == nil) != (b.Map == nil) { + return false + } + switch { + case a.Scalar != nil: + return *a.Scalar == *b.Scalar + case a.List != nil: + return a.List.Equals(b.List) + case a.Map != nil: + return a.Map.Equals(b.Map) + } + return true +} + +// Equals returns true iff the two Maps are equal. +func (a *Map) Equals(b *Map) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if !a.ElementType.Equals(&b.ElementType) { + return false + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + if len(a.Fields) != len(b.Fields) { + return false + } + for i := range a.Fields { + if !a.Fields[i].Equals(&b.Fields[i]) { + return false + } + } + if len(a.Unions) != len(b.Unions) { + return false + } + for i := range a.Unions { + if !a.Unions[i].Equals(&b.Unions[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two Unions are equal. +func (a *Union) Equals(b *Union) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.Discriminator == nil) != (b.Discriminator == nil) { + return false + } + if a.Discriminator != nil { + if *a.Discriminator != *b.Discriminator { + return false + } + } + if a.DeduceInvalidDiscriminator != b.DeduceInvalidDiscriminator { + return false + } + if len(a.Fields) != len(b.Fields) { + return false + } + for i := range a.Fields { + if !a.Fields[i].Equals(&b.Fields[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two UnionFields are equal. +func (a *UnionField) Equals(b *UnionField) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.FieldName != b.FieldName { + return false + } + if a.DiscriminatorValue != b.DiscriminatorValue { + return false + } + return true +} + +// Equals returns true iff the two StructFields are equal. +func (a *StructField) Equals(b *StructField) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.Name != b.Name { + return false + } + if !reflect.DeepEqual(a.Default, b.Default) { + return false + } + return a.Type.Equals(&b.Type) +} + +// Equals returns true iff the two Lists are equal. +func (a *List) Equals(b *List) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if !a.ElementType.Equals(&b.ElementType) { + return false + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + if len(a.Keys) != len(b.Keys) { + return false + } + for i := range a.Keys { + if a.Keys[i] != b.Keys[i] { + return false + } + } + return true +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/schemaschema.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/schemaschema.go new file mode 100644 index 00000000000..6eb6c36df30 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/schemaschema.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// SchemaSchemaYAML is a schema against which you can validate other schemas. +// It will validate itself. It can be unmarshalled into a Schema type. +var SchemaSchemaYAML = `types: +- name: schema + map: + fields: + - name: types + type: + list: + elementRelationship: associative + elementType: + namedType: typeDef + keys: + - name +- name: typeDef + map: + fields: + - name: name + type: + scalar: string + - name: scalar + type: + scalar: string + - name: map + type: + namedType: map + - name: list + type: + namedType: list + - name: untyped + type: + namedType: untyped +- name: typeRef + map: + fields: + - name: namedType + type: + scalar: string + - name: scalar + type: + scalar: string + - name: map + type: + namedType: map + - name: list + type: + namedType: list + - name: untyped + type: + namedType: untyped + - name: elementRelationship + type: + scalar: string +- name: scalar + scalar: string +- name: map + map: + fields: + - name: fields + type: + list: + elementType: + namedType: structField + elementRelationship: associative + keys: [ "name" ] + - name: unions + type: + list: + elementType: + namedType: union + elementRelationship: atomic + - name: elementType + type: + namedType: typeRef + - name: elementRelationship + type: + scalar: string +- name: unionField + map: + fields: + - name: fieldName + type: + scalar: string + - name: discriminatorValue + type: + scalar: string +- name: union + map: + fields: + - name: discriminator + type: + scalar: string + - name: deduceInvalidDiscriminator + type: + scalar: boolean + - name: fields + type: + list: + elementRelationship: associative + elementType: + namedType: unionField + keys: + - fieldName +- name: structField + map: + fields: + - name: name + type: + scalar: string + - name: type + type: + namedType: typeRef + - name: default + type: + namedType: __untyped_atomic_ +- name: list + map: + fields: + - name: elementType + type: + namedType: typeRef + - name: elementRelationship + type: + scalar: string + - name: keys + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: untyped + map: + fields: + - name: elementRelationship + type: + scalar: string +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +` diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/compare.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/compare.go new file mode 100644 index 00000000000..488251f6472 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/compare.go @@ -0,0 +1,470 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/schema" + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} + +func (c *Comparison) FilterFields(filter fieldpath.Filter) *Comparison { + if filter == nil { + return c + } + c.Removed = filter.Filter(c.Removed) + c.Modified = filter.Filter(c.Modified) + c.Added = filter.Filter(c.Added) + return c +} + +type compareWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are comparing + path fieldpath.Path + + // Resulting comparison. + comparison *Comparison + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*compareWalker + + allocator value.Allocator +} + +// compare compares stuff. +func (w *compareWalker) compare(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf { + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *compareWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } else if !value.EqualsUsing(w.allocator, w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + w.comparison.Modified.Insert(w.path) + } +} + +func (w *compareWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *compareWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef, cmp *Comparison) *compareWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*compareWalker{} + } + var w2 *compareWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &compareWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.comparison = cmp + return w2 +} + +func (w *compareWalker) finishDescent(w2 *compareWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *compareWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *compareWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + + maxLength := rLen + if lLen > maxLength { + maxLength = lLen + } + // Contains all the unique PEs between lhs and rhs, exactly once. + // Order doesn't matter since we're just tracking ownership in a set. + allPEs := make([]fieldpath.PathElement, 0, maxLength) + + // Gather all the elements from lhs, indexed by PE, in a list for duplicates. + lValues := fieldpath.MakePathElementMap(lLen) + for i := 0; i < lLen; i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + + if v, found := lValues.Get(pe); found { + list := v.([]value.Value) + lValues.Insert(pe, append(list, child)) + } else { + lValues.Insert(pe, []value.Value{child}) + allPEs = append(allPEs, pe) + } + } + + // Gather all the elements from rhs, indexed by PE, in a list for duplicates. + rValues := fieldpath.MakePathElementMap(rLen) + for i := 0; i < rLen; i++ { + rValue := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, rValue) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if v, found := rValues.Get(pe); found { + list := v.([]value.Value) + rValues.Insert(pe, append(list, rValue)) + } else { + rValues.Insert(pe, []value.Value{rValue}) + if _, found := lValues.Get(pe); !found { + allPEs = append(allPEs, pe) + } + } + } + + for _, pe := range allPEs { + lList := []value.Value(nil) + if l, ok := lValues.Get(pe); ok { + lList = l.([]value.Value) + } + rList := []value.Value(nil) + if l, ok := rValues.Get(pe); ok { + rList = l.([]value.Value) + } + + switch { + case len(lList) == 0 && len(rList) == 0: + // We shouldn't be here anyway. + return + // Normal use-case: + // We have no duplicates for this PE, compare items one-to-one. + case len(lList) <= 1 && len(rList) <= 1: + lValue := value.Value(nil) + if len(lList) != 0 { + lValue = lList[0] + } + rValue := value.Value(nil) + if len(rList) != 0 { + rValue = rList[0] + } + errs = append(errs, w.compareListItem(t, pe, lValue, rValue)...) + // Duplicates before & after use-case: + // Compare the duplicates lists as if they were atomic, mark modified if they changed. + case len(lList) >= 2 && len(rList) >= 2: + listEqual := func(lList, rList []value.Value) bool { + if len(lList) != len(rList) { + return false + } + for i := range lList { + if !value.Equals(lList[i], rList[i]) { + return false + } + } + return true + } + if !listEqual(lList, rList) { + w.comparison.Modified.Insert(append(w.path, pe)) + } + // Duplicates before & not anymore use-case: + // Rcursively add new non-duplicate items, Remove duplicate marker, + case len(lList) >= 2: + if len(rList) != 0 { + errs = append(errs, w.compareListItem(t, pe, nil, rList[0])...) + } + w.comparison.Removed.Insert(append(w.path, pe)) + // New duplicates use-case: + // Recursively remove old non-duplicate items, add duplicate marker. + case len(rList) >= 2: + if len(lList) != 0 { + errs = append(errs, w.compareListItem(t, pe, lList[0], nil)...) + } + w.comparison.Added.Insert(append(w.path, pe)) + } + } + + return +} + +func (w *compareWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + // Ignore repeated occurences of `pe`. + if _, found := observed.Get(pe); found { + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *compareWalker) compareListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) ValidationErrors { + w2 := w.prepareDescent(pe, t.ElementType, w.comparison) + w2.lhs = lChild + w2.rhs = rChild + errs := w2.compare(pe.String) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *compareWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *compareWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType, w.comparison) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.compare(pe.String)...) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + + return errs +} + +func (w *compareWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/e2e/vendor/k8s.io/component-base/config/doc.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/doc.go similarity index 86% rename from e2e/vendor/k8s.io/component-base/config/doc.go rename to e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/doc.go index a06a258eb3f..ca4e60542af 100644 --- a/e2e/vendor/k8s.io/component-base/config/doc.go +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/doc.go @@ -14,6 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package - -package config +// Package typed contains logic for operating on values with given schemas. +package typed diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/helpers.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/helpers.go new file mode 100644 index 00000000000..8a9c0b50e6a --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/helpers.go @@ -0,0 +1,266 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "errors" + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/schema" + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +// ValidationError reports an error about a particular field +type ValidationError struct { + Path string + ErrorMessage string +} + +// Error returns a human readable error message. +func (ve ValidationError) Error() string { + if len(ve.Path) == 0 { + return ve.ErrorMessage + } + return fmt.Sprintf("%s: %v", ve.Path, ve.ErrorMessage) +} + +// ValidationErrors accumulates multiple validation error messages. +type ValidationErrors []ValidationError + +// Error returns a human readable error message reporting each error in the +// list. +func (errs ValidationErrors) Error() string { + if len(errs) == 1 { + return errs[0].Error() + } + messages := []string{"errors:"} + for _, e := range errs { + messages = append(messages, " "+e.Error()) + } + return strings.Join(messages, "\n") +} + +// Set the given path to all the validation errors. +func (errs ValidationErrors) WithPath(p string) ValidationErrors { + for i := range errs { + errs[i].Path = p + } + return errs +} + +// WithPrefix prefixes all errors path with the given pathelement. This +// is useful when unwinding the stack on errors. +func (errs ValidationErrors) WithPrefix(prefix string) ValidationErrors { + for i := range errs { + errs[i].Path = prefix + errs[i].Path + } + return errs +} + +// WithLazyPrefix prefixes all errors path with the given pathelement. +// This is useful when unwinding the stack on errors. Prefix is +// computed lazily only if there is an error. +func (errs ValidationErrors) WithLazyPrefix(fn func() string) ValidationErrors { + if len(errs) == 0 { + return errs + } + prefix := "" + if fn != nil { + prefix = fn() + } + for i := range errs { + errs[i].Path = prefix + errs[i].Path + } + return errs +} + +func errorf(format string, args ...interface{}) ValidationErrors { + return ValidationErrors{{ + ErrorMessage: fmt.Sprintf(format, args...), + }} +} + +type atomHandler interface { + doScalar(*schema.Scalar) ValidationErrors + doList(*schema.List) ValidationErrors + doMap(*schema.Map) ValidationErrors +} + +func resolveSchema(s *schema.Schema, tr schema.TypeRef, v value.Value, ah atomHandler) ValidationErrors { + a, ok := s.Resolve(tr) + if !ok { + typeName := "inlined type" + if tr.NamedType != nil { + typeName = *tr.NamedType + } + return errorf("schema error: no type found matching: %v", typeName) + } + + a = deduceAtom(a, v) + return handleAtom(a, tr, ah) +} + +// deduceAtom determines which of the possible types in atom 'atom' applies to value 'val'. +// If val is of a type allowed by atom, return a copy of atom with all other types set to nil. +// if val is nil, or is not of a type allowed by atom, just return the original atom, +// and validation will fail at a later stage. (with a more useful error) +func deduceAtom(atom schema.Atom, val value.Value) schema.Atom { + switch { + case val == nil: + case val.IsFloat(), val.IsInt(), val.IsString(), val.IsBool(): + if atom.Scalar != nil { + return schema.Atom{Scalar: atom.Scalar} + } + case val.IsList(): + if atom.List != nil { + return schema.Atom{List: atom.List} + } + case val.IsMap(): + if atom.Map != nil { + return schema.Atom{Map: atom.Map} + } + } + return atom +} + +func handleAtom(a schema.Atom, tr schema.TypeRef, ah atomHandler) ValidationErrors { + switch { + case a.Map != nil: + return ah.doMap(a.Map) + case a.Scalar != nil: + return ah.doScalar(a.Scalar) + case a.List != nil: + return ah.doList(a.List) + } + + name := "inlined" + if tr.NamedType != nil { + name = "named type: " + *tr.NamedType + } + + return errorf("schema error: invalid atom: %v", name) +} + +// Returns the list, or an error. Reminder: nil is a valid list and might be returned. +func listValue(a value.Allocator, val value.Value) (value.List, error) { + if val.IsNull() { + // Null is a valid list. + return nil, nil + } + if !val.IsList() { + return nil, fmt.Errorf("expected list, got %v", val) + } + return val.AsListUsing(a), nil +} + +// Returns the map, or an error. Reminder: nil is a valid map and might be returned. +func mapValue(a value.Allocator, val value.Value) (value.Map, error) { + if val == nil { + return nil, fmt.Errorf("expected map, got nil") + } + if val.IsNull() { + // Null is a valid map. + return nil, nil + } + if !val.IsMap() { + return nil, fmt.Errorf("expected map, got %v", val) + } + return val.AsMapUsing(a), nil +} + +func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName string) (interface{}, error) { + atom, ok := s.Resolve(list.ElementType) + if !ok { + return nil, errors.New("invalid elementType for list") + } + if atom.Map == nil { + return nil, errors.New("associative list may not have non-map types") + } + // If the field is not found, we can assume there is no default. + field, _ := atom.Map.FindField(fieldName) + return field.Default, nil +} + +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { + pe := fieldpath.PathElement{} + if child.IsNull() { + // null entries are illegal. + return pe, errors.New("associative list with keys may not have a null element") + } + if !child.IsMap() { + return pe, errors.New("associative list with keys may not have non-map elements") + } + keyMap := value.FieldList{} + m := child.AsMapUsing(a) + defer a.Free(m) + for _, fieldName := range list.Keys { + if val, ok := m.Get(fieldName); ok { + keyMap = append(keyMap, value.Field{Name: fieldName, Value: val}) + } else if def, err := getAssociativeKeyDefault(s, list, fieldName); err != nil { + return pe, fmt.Errorf("couldn't find default value for %v: %v", fieldName, err) + } else if def != nil { + keyMap = append(keyMap, value.Field{Name: fieldName, Value: value.NewValueInterface(def)}) + } else { + // Don't add the key to the key field list. + // A key field list where it is set then represents a different entry + // in the associate list. + } + } + + if len(list.Keys) > 0 && len(keyMap) == 0 { + return pe, fmt.Errorf("associative list with keys has an element that omits all key fields %q (and doesn't have default values for any key fields)", list.Keys) + } + + keyMap.Sort() + pe.Key = &keyMap + return pe, nil +} + +func setItemToPathElement(child value.Value) (fieldpath.PathElement, error) { + pe := fieldpath.PathElement{} + switch { + case child.IsMap(): + // TODO: atomic maps should be acceptable. + return pe, errors.New("associative list without keys has an element that's a map type") + case child.IsList(): + // Should we support a set of lists? For the moment + // let's say we don't. + // TODO: atomic lists should be acceptable. + return pe, errors.New("not supported: associative list with lists as elements") + case child.IsNull(): + return pe, errors.New("associative list without keys has an element that's an explicit null") + default: + // We are a set type. + pe.Value = &child + return pe, nil + } +} + +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship != schema.Associative { + return fieldpath.PathElement{}, errors.New("invalid indexing of non-associative list") + } + + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, child) + } + + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(child) +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/merge.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/merge.go new file mode 100644 index 00000000000..f8ca9aba82f --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/merge.go @@ -0,0 +1,427 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/schema" + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +type mergingWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are merging + path fieldpath.Path + + // How to merge. Called after schema validation for all leaf fields. + rule mergeRule + + // If set, called after non-leaf items have been merged. (`out` is + // probably already set.) + postItemHook mergeRule + + // output of the merge operation (nil if none) + out *interface{} + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*mergingWalker + + allocator value.Allocator +} + +// merge rules examine w.lhs and w.rhs (up to one of which may be nil) and +// optionally set w.out. If lhs and rhs are both set, they will be of +// comparable type. +type mergeRule func(w *mergingWalker) + +var ( + ruleKeepRHS = mergeRule(func(w *mergingWalker) { + if w.rhs != nil { + v := w.rhs.Unstructured() + w.out = &v + } else if w.lhs != nil { + v := w.lhs.Unstructured() + w.out = &v + } + }) +) + +// merge sets w.out. +func (w *mergingWalker) merge(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf && w.postItemHook != nil { + w.postItemHook(w) + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *mergingWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + w.rule(w) +} + +func (w *mergingWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *mergingWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *mergingWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*mergingWalker{} + } + var w2 *mergingWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &mergingWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.out = nil + return w2 +} + +func (w *mergingWalker) finishDescent(w2 *mergingWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *mergingWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + outLen := lLen + if outLen < rLen { + outLen = rLen + } + out := make([]interface{}, 0, outLen) + + rhsPEs, observedRHS, rhsErrs := w.indexListPathElements(t, rhs, false) + errs = append(errs, rhsErrs...) + lhsPEs, observedLHS, lhsErrs := w.indexListPathElements(t, lhs, true) + errs = append(errs, lhsErrs...) + + if len(errs) != 0 { + return errs + } + + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) + for i := range rhsPEs { + pe := &rhsPEs[i] + if _, ok := observedLHS.Get(*pe); ok { + sharedOrder = append(sharedOrder, pe) + } + } + + var nextShared *fieldpath.PathElement + if len(sharedOrder) > 0 { + nextShared = sharedOrder[0] + sharedOrder = sharedOrder[1:] + } + + mergedRHS := fieldpath.MakePathElementMap(len(rhsPEs)) + lLen, rLen = len(lhsPEs), len(rhsPEs) + for lI, rI := 0, 0; lI < lLen || rI < rLen; { + if lI < lLen && rI < rLen { + pe := lhsPEs[lI] + if pe.Equals(rhsPEs[rI]) { + // merge LHS & RHS items + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if the PE is duplicaated. + rChild, _ := observedRHS.Get(pe) + mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) + errs = append(errs, errs...) + if mergeOut != nil { + out = append(out, *mergeOut) + } + lI++ + rI++ + + nextShared = nil + if len(sharedOrder) > 0 { + nextShared = sharedOrder[0] + sharedOrder = sharedOrder[1:] + } + continue + } + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsPEs[lI]) { + // shared item, but not the one we want in this round + lI++ + continue + } + } + if lI < lLen { + pe := lhsPEs[lI] + if _, ok := observedRHS.Get(pe); !ok { + // take LHS item using At to make sure we get the right item (observed may not contain the right item). + lChild := lhs.AtUsing(w.allocator, lI) + mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) + errs = append(errs, errs...) + if mergeOut != nil { + out = append(out, *mergeOut) + } + lI++ + continue + } else if _, ok := mergedRHS.Get(pe); ok { + // we've already merged it with RHS, we don't want to duplicate it, skip it. + lI++ + } + } + if rI < rLen { + // Take the RHS item, merge with matching LHS item if possible + pe := rhsPEs[rI] + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if absent or duplicaated. + rChild, _ := observedRHS.Get(pe) + mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) + errs = append(errs, errs...) + if mergeOut != nil { + out = append(out, *mergeOut) + } + rI++ + // Advance nextShared, if we are merging nextShared. + if nextShared != nil && nextShared.Equals(pe) { + nextShared = nil + if len(sharedOrder) > 0 { + nextShared = sharedOrder[0] + sharedOrder = sharedOrder[1:] + } + } + } + } + + if len(out) > 0 { + i := interface{}(out) + w.out = &i + } + + return errs +} + +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List, allowDuplicates bool) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if _, found := observed.Get(pe); found && !allowDuplicates { + errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) + continue + } else if !found { + observed.Insert(pe, child) + } else { + // Duplicated items are not merged with the new value, make them nil. + observed.Insert(pe, value.NewValueInterface(nil)) + } + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *mergingWalker) mergeListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) (out *interface{}, errs ValidationErrors) { + w2 := w.prepareDescent(pe, t.ElementType) + w2.lhs = lChild + w2.rhs = rChild + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out = w2.out + } + w.finishDescent(w2) + return +} + +func (w *mergingWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *mergingWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *mergingWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out[key] = *w2.out + } + w.finishDescent(w2) + return errs +} + +func (w *mergingWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + if len(out) > 0 { + i := interface{}(out) + w.out = &i + } + + return errs +} + +func (w *mergingWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/parser.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/parser.go new file mode 100644 index 00000000000..c46e69f21f8 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/parser.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + + yaml "go.yaml.in/yaml/v2" + "sigs.k8s.io/structured-merge-diff/v6/schema" + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +// YAMLObject is an object encoded in YAML. +type YAMLObject string + +// Parser implements YAMLParser and allows introspecting the schema. +type Parser struct { + Schema schema.Schema +} + +// create builds an unvalidated parser. +func create(s YAMLObject) (*Parser, error) { + p := Parser{} + err := yaml.Unmarshal([]byte(s), &p.Schema) + return &p, err +} + +func createOrDie(schema YAMLObject) *Parser { + p, err := create(schema) + if err != nil { + panic(fmt.Errorf("failed to create parser: %v", err)) + } + return p +} + +var ssParser = createOrDie(YAMLObject(schema.SchemaSchemaYAML)) + +// NewParser will build a YAMLParser from a schema. The schema is validated. +func NewParser(schema YAMLObject) (*Parser, error) { + _, err := ssParser.Type("schema").FromYAML(schema) + if err != nil { + return nil, fmt.Errorf("unable to validate schema: %v", err) + } + p, err := create(schema) + if err != nil { + return nil, err + } + return p, nil +} + +// TypeNames returns a list of types this parser understands. +func (p *Parser) TypeNames() (names []string) { + for _, td := range p.Schema.Types { + names = append(names, td.Name) + } + return names +} + +// Type returns a helper which can produce objects of the given type. Any +// errors are deferred until a further function is called. +func (p *Parser) Type(name string) ParseableType { + return ParseableType{ + Schema: &p.Schema, + TypeRef: schema.TypeRef{NamedType: &name}, + } +} + +// ParseableType allows for easy production of typed objects. +type ParseableType struct { + TypeRef schema.TypeRef + Schema *schema.Schema +} + +// IsValid return true if p's schema and typename are valid. +func (p ParseableType) IsValid() bool { + _, ok := p.Schema.Resolve(p.TypeRef) + return ok +} + +// FromYAML parses a yaml string into an object with the current schema +// and the type "typename" or an error if validation fails. +func (p ParseableType) FromYAML(object YAMLObject, opts ...ValidationOptions) (*TypedValue, error) { + var v interface{} + err := yaml.Unmarshal([]byte(object), &v) + if err != nil { + return nil, err + } + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef, opts...) +} + +// FromUnstructured converts a go "interface{}" type, typically an +// unstructured object in Kubernetes world, to a TypedValue. It returns an +// error if the resulting object fails schema validation. +// The provided interface{} must be one of: map[string]interface{}, +// map[interface{}]interface{}, []interface{}, int types, float types, +// string or boolean. Nested interface{} must also be one of these types. +func (p ParseableType) FromUnstructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef, opts...) +} + +// FromStructured converts a go "interface{}" type, typically an structured object in +// Kubernetes, to a TypedValue. It will return an error if the resulting object fails +// schema validation. The provided "interface{}" value must be a pointer so that the +// value can be modified via reflection. The provided "interface{}" may contain structs +// and types that are converted to Values by the jsonMarshaler interface. +func (p ParseableType) FromStructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { + v, err := value.NewValueReflect(in) + if err != nil { + return nil, fmt.Errorf("error creating struct value reflector: %v", err) + } + return AsTyped(v, p.Schema, p.TypeRef, opts...) +} + +// DeducedParseableType is a ParseableType that deduces the type from +// the content of the object. +var DeducedParseableType ParseableType = createOrDie(YAMLObject(`types: +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`)).Type("__untyped_deduced_") diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/reconcile_schema.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/reconcile_schema.go new file mode 100644 index 00000000000..9b20e54aa0c --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/reconcile_schema.go @@ -0,0 +1,290 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "sync" + + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/schema" +) + +var fmPool = sync.Pool{ + New: func() interface{} { return &reconcileWithSchemaWalker{} }, +} + +func (v *reconcileWithSchemaWalker) finished() { + v.fieldSet = nil + v.schema = nil + v.value = nil + v.typeRef = schema.TypeRef{} + v.path = nil + v.toRemove = nil + v.toAdd = nil + fmPool.Put(v) +} + +type reconcileWithSchemaWalker struct { + value *TypedValue // root of the live object + schema *schema.Schema // root of the live schema + + // state of node being visited by walker + fieldSet *fieldpath.Set + typeRef schema.TypeRef + path fieldpath.Path + isAtomic bool + + // the accumulated diff to perform to apply reconciliation + toRemove *fieldpath.Set // paths to remove recursively + toAdd *fieldpath.Set // paths to add after any removals + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*reconcileWithSchemaWalker +} + +func (v *reconcileWithSchemaWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *reconcileWithSchemaWalker { + if v.spareWalkers == nil { + // first descent. + v.spareWalkers = &[]*reconcileWithSchemaWalker{} + } + var v2 *reconcileWithSchemaWalker + if n := len(*v.spareWalkers); n > 0 { + v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1] + } else { + v2 = &reconcileWithSchemaWalker{} + } + *v2 = *v + v2.typeRef = tr + v2.path = append(v.path, pe) + v2.value = v.value + return v2 +} + +func (v *reconcileWithSchemaWalker) finishDescent(v2 *reconcileWithSchemaWalker) { + v2.fieldSet = nil + v2.schema = nil + v2.value = nil + v2.typeRef = schema.TypeRef{} + if cap(v2.path) < 20 { // recycle slices that do not have unexpectedly high capacity + v2.path = v2.path[:0] + } else { + v2.path = nil + } + + // merge any accumulated changes into parent walker + if v2.toRemove != nil { + if v.toRemove == nil { + v.toRemove = v2.toRemove + } else { + v.toRemove = v.toRemove.Union(v2.toRemove) + } + } + if v2.toAdd != nil { + if v.toAdd == nil { + v.toAdd = v2.toAdd + } else { + v.toAdd = v.toAdd.Union(v2.toAdd) + } + } + v2.toRemove = nil + v2.toAdd = nil + + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + *v.spareWalkers = append(*v.spareWalkers, v2) +} + +// ReconcileFieldSetWithSchema reconciles the a field set with any changes to the +// object's schema since the field set was written. Returns the reconciled field set, or nil of +// no changes were made to the field set. +// +// Supports: +// - changing types from atomic to granular +// - changing types from granular to atomic +func ReconcileFieldSetWithSchema(fieldset *fieldpath.Set, tv *TypedValue) (*fieldpath.Set, error) { + v := fmPool.Get().(*reconcileWithSchemaWalker) + v.fieldSet = fieldset + v.value = tv + + v.schema = tv.schema + v.typeRef = tv.typeRef + + defer v.finished() + errs := v.reconcile() + + if len(errs) > 0 { + return nil, fmt.Errorf("errors reconciling field set with schema: %s", errs.Error()) + } + + // If there are any accumulated changes, apply them + if v.toAdd != nil || v.toRemove != nil { + out := v.fieldSet + if v.toRemove != nil { + out = out.RecursiveDifference(v.toRemove) + } + if v.toAdd != nil { + out = out.Union(v.toAdd) + } + return out, nil + } + return nil, nil +} + +func (v *reconcileWithSchemaWalker) reconcile() (errs ValidationErrors) { + a, ok := v.schema.Resolve(v.typeRef) + if !ok { + errs = append(errs, errorf("could not resolve %v", v.typeRef)...) + return + } + return handleAtom(a, v.typeRef, v) +} + +func (v *reconcileWithSchemaWalker) doScalar(_ *schema.Scalar) (errs ValidationErrors) { + return errs +} + +func (v *reconcileWithSchemaWalker) visitListItems(t *schema.List, element *fieldpath.Set) (errs ValidationErrors) { + handleElement := func(pe fieldpath.PathElement, isMember bool) { + var hasChildren bool + v2 := v.prepareDescent(pe, t.ElementType) + v2.fieldSet, hasChildren = element.Children.Get(pe) + v2.isAtomic = isMember && !hasChildren + errs = append(errs, v2.reconcile()...) + v.finishDescent(v2) + } + element.Children.Iterate(func(pe fieldpath.PathElement) { + if element.Members.Has(pe) { + return + } + handleElement(pe, false) + }) + element.Members.Iterate(func(pe fieldpath.PathElement) { + handleElement(pe, true) + }) + return errs +} + +func (v *reconcileWithSchemaWalker) doList(t *schema.List) (errs ValidationErrors) { + // reconcile lists changed from granular to atomic. + // Note that migrations from atomic to granular are not recommended and will + // be treated as if they were always granular. + // + // In this case, the manager that owned the previously atomic field (and all subfields), + // will now own just the top-level field and none of the subfields. + if !v.isAtomic && t.ElementRelationship == schema.Atomic { + v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields + v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic + return errs + } + if v.fieldSet != nil { + errs = v.visitListItems(t, v.fieldSet) + } + return errs +} + +func (v *reconcileWithSchemaWalker) visitMapItems(t *schema.Map, element *fieldpath.Set) (errs ValidationErrors) { + handleElement := func(pe fieldpath.PathElement, isMember bool) { + var hasChildren bool + if tr, ok := typeRefAtPath(t, pe); ok { // ignore fields not in the schema + v2 := v.prepareDescent(pe, tr) + v2.fieldSet, hasChildren = element.Children.Get(pe) + v2.isAtomic = isMember && !hasChildren + errs = append(errs, v2.reconcile()...) + v.finishDescent(v2) + } + } + element.Children.Iterate(func(pe fieldpath.PathElement) { + if element.Members.Has(pe) { + return + } + handleElement(pe, false) + }) + element.Members.Iterate(func(pe fieldpath.PathElement) { + handleElement(pe, true) + }) + + return errs +} + +func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) { + // We don't currently reconcile deduced types (unstructured CRDs) or maps that contain only unknown + // fields since deduced types do not yet support atomic or granular tags. + if isUntypedDeducedMap(t) { + return errs + } + + // reconcile maps and structs changed from granular to atomic. + // Note that migrations from atomic to granular are not recommended and will + // be treated as if they were always granular. + // + // In this case the manager that owned the previously atomic field (and all subfields), + // will now own just the top-level field and none of the subfields. + if !v.isAtomic && t.ElementRelationship == schema.Atomic { + if v.fieldSet != nil && v.fieldSet.Size() > 0 { + v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields + v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic + } + return errs + } + if v.fieldSet != nil { + errs = v.visitMapItems(t, v.fieldSet) + } + return errs +} + +func fieldSetAtPath(node *fieldpath.Set, path fieldpath.Path) (*fieldpath.Set, bool) { + ok := true + for _, pe := range path { + if node, ok = node.Children.Get(pe); !ok { + break + } + } + return node, ok +} + +func descendToPath(node *fieldpath.Set, path fieldpath.Path) *fieldpath.Set { + for _, pe := range path { + node = node.Children.Descend(pe) + } + return node +} + +func typeRefAtPath(t *schema.Map, pe fieldpath.PathElement) (schema.TypeRef, bool) { + tr := t.ElementType + if pe.FieldName != nil { + if sf, ok := t.FindField(*pe.FieldName); ok { + tr = sf.Type + } + } + return tr, tr != schema.TypeRef{} +} + +// isUntypedDeducedMap returns true if m has no fields defined, but allows untyped elements. +// This is equivalent to a openAPI object that has x-kubernetes-preserve-unknown-fields=true +// but does not have any properties defined on the object. +func isUntypedDeducedMap(m *schema.Map) bool { + return isUntypedDeducedRef(m.ElementType) && m.Fields == nil +} + +func isUntypedDeducedRef(t schema.TypeRef) bool { + if t.NamedType != nil { + return *t.NamedType == "__untyped_deduced_" + } + atom := t.Inlined + return atom.Scalar != nil && *atom.Scalar == "untyped" +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go new file mode 100644 index 00000000000..86de5105d7c --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go @@ -0,0 +1,165 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/schema" + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +type removingWalker struct { + value value.Value + out interface{} + schema *schema.Schema + toRemove *fieldpath.Set + allocator value.Allocator + shouldExtract bool +} + +// removeItemsWithSchema will walk the given value and look for items from the toRemove set. +// Depending on whether shouldExtract is set true or false, it will return a modified version +// of the input value with either: +// 1. only the items in the toRemove set (when shouldExtract is true) or +// 2. the items from the toRemove set removed from the value (when shouldExtract is false). +func removeItemsWithSchema(val value.Value, toRemove *fieldpath.Set, schema *schema.Schema, typeRef schema.TypeRef, shouldExtract bool) value.Value { + w := &removingWalker{ + value: val, + schema: schema, + toRemove: toRemove, + allocator: value.NewFreelistAllocator(), + shouldExtract: shouldExtract, + } + resolveSchema(schema, typeRef, val, w) + return value.NewValueInterface(w.out) +} + +func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors { + w.out = w.value.Unstructured() + return nil +} + +func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { + if !w.value.IsList() { + return nil + } + l := w.value.AsListUsing(w.allocator) + defer w.allocator.Free(l) + // If list is null or empty just return + if l == nil || l.Length() == 0 { + return nil + } + + // atomic lists should return everything in the case of extract + // and nothing in the case of remove (!w.shouldExtract) + if t.ElementRelationship == schema.Atomic { + if w.shouldExtract { + w.out = w.value.Unstructured() + } + return nil + } + + var newItems []interface{} + iter := l.RangeUsing(w.allocator) + defer w.allocator.Free(iter) + for iter.Next() { + _, item := iter.Item() + // Ignore error because we have already validated this list + pe, _ := listItemToPathElement(w.allocator, w.schema, t, item) + path, _ := fieldpath.MakePath(pe) + // save items on the path when we shouldExtract + // but ignore them when we are removing (i.e. !w.shouldExtract) + if w.toRemove.Has(path) { + if w.shouldExtract { + newItems = append(newItems, removeItemsWithSchema(item, w.toRemove, w.schema, t.ElementType, w.shouldExtract).Unstructured()) + } else { + continue + } + } + if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { + item = removeItemsWithSchema(item, subset, w.schema, t.ElementType, w.shouldExtract) + } else { + // don't save items not on the path when we shouldExtract. + if w.shouldExtract { + continue + } + } + newItems = append(newItems, item.Unstructured()) + } + if len(newItems) > 0 { + w.out = newItems + } + return nil +} + +func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { + if !w.value.IsMap() { + return nil + } + m := w.value.AsMapUsing(w.allocator) + if m != nil { + defer w.allocator.Free(m) + } + // If map is null or empty just return + if m == nil || m.Empty() { + return nil + } + + // atomic maps should return everything in the case of extract + // and nothing in the case of remove (!w.shouldExtract) + if t.ElementRelationship == schema.Atomic { + if w.shouldExtract { + w.out = w.value.Unstructured() + } + return nil + } + + fieldTypes := map[string]schema.TypeRef{} + for _, structField := range t.Fields { + fieldTypes[structField.Name] = structField.Type + } + + newMap := map[string]interface{}{} + m.Iterate(func(k string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &k} + path, _ := fieldpath.MakePath(pe) + fieldType := t.ElementType + if ft, ok := fieldTypes[k]; ok { + fieldType = ft + } + // save values on the path when we shouldExtract + // but ignore them when we are removing (i.e. !w.shouldExtract) + if w.toRemove.Has(path) { + if w.shouldExtract { + newMap[k] = removeItemsWithSchema(val, w.toRemove, w.schema, fieldType, w.shouldExtract).Unstructured() + + } + return true + } + if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { + val = removeItemsWithSchema(val, subset, w.schema, fieldType, w.shouldExtract) + } else { + // don't save values not on the path when we shouldExtract. + if w.shouldExtract { + return true + } + } + newMap[k] = val.Unstructured() + return true + }) + if len(newMap) > 0 { + w.out = newMap + } + return nil +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/tofieldset.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/tofieldset.go new file mode 100644 index 00000000000..a52e342e067 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/tofieldset.go @@ -0,0 +1,190 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sync" + + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/schema" + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +var tPool = sync.Pool{ + New: func() interface{} { return &toFieldSetWalker{} }, +} + +func (tv TypedValue) toFieldSetWalker() *toFieldSetWalker { + v := tPool.Get().(*toFieldSetWalker) + v.value = tv.value + v.schema = tv.schema + v.typeRef = tv.typeRef + v.set = &fieldpath.Set{} + v.allocator = value.NewFreelistAllocator() + return v +} + +func (v *toFieldSetWalker) finished() { + v.schema = nil + v.typeRef = schema.TypeRef{} + v.path = nil + v.set = nil + tPool.Put(v) +} + +type toFieldSetWalker struct { + value value.Value + schema *schema.Schema + typeRef schema.TypeRef + + set *fieldpath.Set + path fieldpath.Path + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*toFieldSetWalker + allocator value.Allocator +} + +func (v *toFieldSetWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *toFieldSetWalker { + if v.spareWalkers == nil { + // first descent. + v.spareWalkers = &[]*toFieldSetWalker{} + } + var v2 *toFieldSetWalker + if n := len(*v.spareWalkers); n > 0 { + v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1] + } else { + v2 = &toFieldSetWalker{} + } + *v2 = *v + v2.typeRef = tr + v2.path = append(v2.path, pe) + return v2 +} + +func (v *toFieldSetWalker) finishDescent(v2 *toFieldSetWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + v.path = v2.path[:len(v2.path)-1] + *v.spareWalkers = append(*v.spareWalkers, v2) +} + +func (v *toFieldSetWalker) toFieldSet() ValidationErrors { + return resolveSchema(v.schema, v.typeRef, v.value, v) +} + +func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { + v.set.Insert(v.path) + + return nil +} + +func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + // Keeps track of the PEs we've seen + seen := fieldpath.MakePathElementSet(list.Length()) + // Keeps tracks of the PEs we've counted as duplicates + duplicates := fieldpath.MakePathElementSet(list.Length()) + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if seen.Has(pe) { + if duplicates.Has(pe) { + // do nothing + } else { + v.set.Insert(append(v.path, pe)) + duplicates.Insert(pe) + } + } else { + seen.Insert(pe) + } + } + + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if duplicates.Has(pe) { + continue + } + v2 := v.prepareDescent(pe, t.ElementType) + v2.value = child + errs = append(errs, v2.toFieldSet()...) + + v2.set.Insert(v2.path) + v.finishDescent(v2) + } + return errs +} + +func (v *toFieldSetWalker) doList(t *schema.List) (errs ValidationErrors) { + list, _ := listValue(v.allocator, v.value) + if list != nil { + defer v.allocator.Free(list) + } + if t.ElementRelationship == schema.Atomic { + v.set.Insert(v.path) + return nil + } + + if list == nil { + return nil + } + + errs = v.visitListItems(t, list) + + return errs +} + +func (v *toFieldSetWalker) visitMapItems(t *schema.Map, m value.Map) (errs ValidationErrors) { + m.Iterate(func(key string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &key} + + tr := t.ElementType + if sf, ok := t.FindField(key); ok { + tr = sf.Type + } + v2 := v.prepareDescent(pe, tr) + v2.value = val + errs = append(errs, v2.toFieldSet()...) + if val.IsNull() || (val.IsMap() && val.AsMap().Length() == 0) { + v2.set.Insert(v2.path) + } else if _, ok := t.FindField(key); !ok { + v2.set.Insert(v2.path) + } + v.finishDescent(v2) + return true + }) + return errs +} + +func (v *toFieldSetWalker) doMap(t *schema.Map) (errs ValidationErrors) { + m, _ := mapValue(v.allocator, v.value) + if m != nil { + defer v.allocator.Free(m) + } + if t.ElementRelationship == schema.Atomic { + v.set.Insert(v.path) + return nil + } + + if m == nil { + return nil + } + + errs = v.visitMapItems(t, m) + + return errs +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/typed.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/typed.go new file mode 100644 index 00000000000..0f9968fd926 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/typed.go @@ -0,0 +1,294 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sync" + + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/schema" + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +// ValidationOptions is the list of all the options available when running the validation. +type ValidationOptions int + +const ( + // AllowDuplicates means that sets and associative lists can have duplicate similar items. + AllowDuplicates ValidationOptions = iota +) + +// extractItemsOptions is the options available when extracting items. +type extractItemsOptions struct { + appendKeyFields bool +} + +type ExtractItemsOption func(*extractItemsOptions) + +// WithAppendKeyFields configures ExtractItems to include key fields. +// It is exported for use in configuring ExtractItems. +func WithAppendKeyFields() ExtractItemsOption { + return func(opts *extractItemsOptions) { + opts.appendKeyFields = true + } +} + +// AsTyped accepts a value and a type and returns a TypedValue. 'v' must have +// type 'typeName' in the schema. An error is returned if the v doesn't conform +// to the schema. +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef, opts ...ValidationOptions) (*TypedValue, error) { + tv := &TypedValue{ + value: v, + typeRef: typeRef, + schema: s, + } + if err := tv.Validate(opts...); err != nil { + return nil, err + } + return tv, nil +} + +// AsTypeUnvalidated is just like AsTyped, but doesn't validate that the type +// conforms to the schema, for cases where that has already been checked or +// where you're going to call a method that validates as a side-effect (like +// ToFieldSet). +// +// Deprecated: This function was initially created because validation +// was expensive. Now that this has been solved, objects should always +// be created as validated, using `AsTyped`. +func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue { + tv := &TypedValue{ + value: v, + typeRef: typeRef, + schema: s, + } + return tv +} + +// TypedValue is a value of some specific type. +type TypedValue struct { + value value.Value + typeRef schema.TypeRef + schema *schema.Schema +} + +// TypeRef is the type of the value. +func (tv TypedValue) TypeRef() schema.TypeRef { + return tv.typeRef +} + +// AsValue removes the type from the TypedValue and only keeps the value. +func (tv TypedValue) AsValue() value.Value { + return tv.value +} + +// Schema gets the schema from the TypedValue. +func (tv TypedValue) Schema() *schema.Schema { + return tv.schema +} + +// Validate returns an error with a list of every spec violation. +func (tv TypedValue) Validate(opts ...ValidationOptions) error { + w := tv.walker() + for _, opt := range opts { + switch opt { + case AllowDuplicates: + w.allowDuplicates = true + } + } + defer w.finished() + if errs := w.validate(nil); len(errs) != 0 { + return errs + } + return nil +} + +// ToFieldSet creates a set containing every leaf field and item mentioned, or +// validation errors, if any were encountered. +func (tv TypedValue) ToFieldSet() (*fieldpath.Set, error) { + w := tv.toFieldSetWalker() + defer w.finished() + if errs := w.toFieldSet(); len(errs) != 0 { + return nil, errs + } + return w.set, nil +} + +// Merge returns the result of merging tv and pso ("partially specified +// object") together. Of note: +// - No fields can be removed by this operation. +// - If both tv and pso specify a given leaf field, the result will keep pso's +// value. +// - Container typed elements will have their items ordered: +// 1. like tv, if pso doesn't change anything in the container +// 2. like pso, if pso does change something in the container. +// +// tv and pso must both be of the same type (their Schema and TypeRef must +// match), or an error will be returned. Validation errors will be returned if +// the objects don't conform to the schema. +func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { + return merge(&tv, pso, ruleKeepRHS, nil) +} + +var cmpwPool = sync.Pool{ + New: func() interface{} { return &compareWalker{} }, +} + +// Compare compares the two objects. See the comments on the `Comparison` +// struct for details on the return value. +// +// tv and rhs must both be of the same type (their Schema and TypeRef must +// match), or an error will be returned. Validation errors will be returned if +// the objects don't conform to the schema. +func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { + lhs := tv + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + cmpw := cmpwPool.Get().(*compareWalker) + defer func() { + cmpw.lhs = nil + cmpw.rhs = nil + cmpw.schema = nil + cmpw.typeRef = schema.TypeRef{} + cmpw.comparison = nil + cmpw.inLeaf = false + + cmpwPool.Put(cmpw) + }() + + cmpw.lhs = lhs.value + cmpw.rhs = rhs.value + cmpw.schema = lhs.schema + cmpw.typeRef = lhs.typeRef + cmpw.comparison = &Comparison{ + Removed: fieldpath.NewSet(), + Modified: fieldpath.NewSet(), + Added: fieldpath.NewSet(), + } + if cmpw.allocator == nil { + cmpw.allocator = value.NewFreelistAllocator() + } + + errs := cmpw.compare(nil) + if len(errs) > 0 { + return nil, errs + } + return cmpw.comparison, nil +} + +// RemoveItems removes each provided list or map item from the value. +func (tv TypedValue) RemoveItems(items *fieldpath.Set) *TypedValue { + tv.value = removeItemsWithSchema(tv.value, items, tv.schema, tv.typeRef, false) + return &tv +} + +// ExtractItems returns a value with only the provided list or map items extracted from the value. +func (tv TypedValue) ExtractItems(items *fieldpath.Set, opts ...ExtractItemsOption) *TypedValue { + options := &extractItemsOptions{} + for _, opt := range opts { + opt(options) + } + if options.appendKeyFields { + tvPathSet, err := tv.ToFieldSet() + if err == nil { + keyFieldPathSet := fieldpath.NewSet() + items.Iterate(func(path fieldpath.Path) { + if !tvPathSet.Has(path) { + return + } + for i, pe := range path { + if pe.Key == nil { + continue + } + for _, keyField := range *pe.Key { + keyName := keyField.Name + // Create a new slice with the same elements as path[:i+1], but set its capacity to len(path[:i+1]). + // This ensures that appending to keyFieldPath creates a new underlying array, avoiding accidental + // modification of the original slice (path). + keyFieldPath := append(path[:i+1:i+1], fieldpath.PathElement{FieldName: &keyName}) + keyFieldPathSet.Insert(keyFieldPath) + } + } + }) + items = items.Union(keyFieldPathSet) + } + } + + tv.value = removeItemsWithSchema(tv.value, items, tv.schema, tv.typeRef, true) + return &tv +} + +func (tv TypedValue) Empty() *TypedValue { + tv.value = value.NewValueInterface(nil) + return &tv +} + +var mwPool = sync.Pool{ + New: func() interface{} { return &mergingWalker{} }, +} + +func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) { + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + mw := mwPool.Get().(*mergingWalker) + defer func() { + mw.lhs = nil + mw.rhs = nil + mw.schema = nil + mw.typeRef = schema.TypeRef{} + mw.rule = nil + mw.postItemHook = nil + mw.out = nil + mw.inLeaf = false + + mwPool.Put(mw) + }() + + mw.lhs = lhs.value + mw.rhs = rhs.value + mw.schema = lhs.schema + mw.typeRef = lhs.typeRef + mw.rule = rule + mw.postItemHook = postRule + if mw.allocator == nil { + mw.allocator = value.NewFreelistAllocator() + } + + errs := mw.merge(nil) + if len(errs) > 0 { + return nil, errs + } + + out := &TypedValue{ + schema: lhs.schema, + typeRef: lhs.typeRef, + } + if mw.out != nil { + out.value = value.NewValueInterface(*mw.out) + } + return out, nil +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/validate.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/validate.go new file mode 100644 index 00000000000..3371f87b974 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/validate.go @@ -0,0 +1,205 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "sync" + + "sigs.k8s.io/structured-merge-diff/v6/fieldpath" + "sigs.k8s.io/structured-merge-diff/v6/schema" + "sigs.k8s.io/structured-merge-diff/v6/value" +) + +var vPool = sync.Pool{ + New: func() interface{} { return &validatingObjectWalker{} }, +} + +func (tv TypedValue) walker() *validatingObjectWalker { + v := vPool.Get().(*validatingObjectWalker) + v.value = tv.value + v.schema = tv.schema + v.typeRef = tv.typeRef + v.allowDuplicates = false + if v.allocator == nil { + v.allocator = value.NewFreelistAllocator() + } + return v +} + +func (v *validatingObjectWalker) finished() { + v.schema = nil + v.typeRef = schema.TypeRef{} + vPool.Put(v) +} + +type validatingObjectWalker struct { + value value.Value + schema *schema.Schema + typeRef schema.TypeRef + // If set to true, duplicates will be allowed in + // associativeLists/sets. + allowDuplicates bool + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*validatingObjectWalker + allocator value.Allocator +} + +func (v *validatingObjectWalker) prepareDescent(tr schema.TypeRef) *validatingObjectWalker { + if v.spareWalkers == nil { + // first descent. + v.spareWalkers = &[]*validatingObjectWalker{} + } + var v2 *validatingObjectWalker + if n := len(*v.spareWalkers); n > 0 { + v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1] + } else { + v2 = &validatingObjectWalker{} + } + *v2 = *v + v2.typeRef = tr + return v2 +} + +func (v *validatingObjectWalker) finishDescent(v2 *validatingObjectWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + *v.spareWalkers = append(*v.spareWalkers, v2) +} + +func (v *validatingObjectWalker) validate(prefixFn func() string) ValidationErrors { + return resolveSchema(v.schema, v.typeRef, v.value, v).WithLazyPrefix(prefixFn) +} + +func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs ValidationErrors) { + if v == nil { + return nil + } + if v.IsNull() { + return nil + } + switch *t { + case schema.Numeric: + if !v.IsFloat() && !v.IsInt() { + // TODO: should the schema separate int and float? + return errorf("%vexpected numeric (int or float), got %T", prefix, v.Unstructured()) + } + case schema.String: + if !v.IsString() { + return errorf("%vexpected string, got %#v", prefix, v) + } + case schema.Boolean: + if !v.IsBool() { + return errorf("%vexpected boolean, got %v", prefix, v) + } + case schema.Untyped: + if !v.IsFloat() && !v.IsInt() && !v.IsString() && !v.IsBool() { + return errorf("%vexpected any scalar, got %v", prefix, v) + } + default: + return errorf("%vunexpected scalar type in schema: %v", prefix, *t) + } + return nil +} + +func (v *validatingObjectWalker) doScalar(t *schema.Scalar) ValidationErrors { + if errs := validateScalar(t, v.value, ""); len(errs) > 0 { + return errs + } + return nil +} + +func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + observedKeys := fieldpath.MakePathElementSet(list.Length()) + for i := 0; i < list.Length(); i++ { + child := list.AtUsing(v.allocator, i) + defer v.allocator.Free(child) + var pe fieldpath.PathElement + if t.ElementRelationship != schema.Associative { + pe.Index = &i + } else { + var err error + pe, err = listItemToPathElement(v.allocator, v.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + return + } + if observedKeys.Has(pe) && !v.allowDuplicates { + errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) + } + observedKeys.Insert(pe) + } + v2 := v.prepareDescent(t.ElementType) + v2.value = child + errs = append(errs, v2.validate(pe.String)...) + v.finishDescent(v2) + } + return errs +} + +func (v *validatingObjectWalker) doList(t *schema.List) (errs ValidationErrors) { + list, err := listValue(v.allocator, v.value) + if err != nil { + return errorf("%v", err) + } + + if list == nil { + return nil + } + + defer v.allocator.Free(list) + errs = v.visitListItems(t, list) + + return errs +} + +func (v *validatingObjectWalker) visitMapItems(t *schema.Map, m value.Map) (errs ValidationErrors) { + m.IterateUsing(v.allocator, func(key string, val value.Value) bool { + pe := fieldpath.PathElement{FieldName: &key} + tr := t.ElementType + if sf, ok := t.FindField(key); ok { + tr = sf.Type + } else if (t.ElementType == schema.TypeRef{}) { + errs = append(errs, errorf("field not declared in schema").WithPrefix(pe.String())...) + return false + } + v2 := v.prepareDescent(tr) + v2.value = val + // Giving pe.String as a parameter actually increases the allocations. + errs = append(errs, v2.validate(func() string { return pe.String() })...) + v.finishDescent(v2) + return true + }) + return errs +} + +func (v *validatingObjectWalker) doMap(t *schema.Map) (errs ValidationErrors) { + m, err := mapValue(v.allocator, v.value) + if err != nil { + return errorf("%v", err) + } + if m == nil { + return nil + } + defer v.allocator.Free(m) + errs = v.visitMapItems(t, m) + + return errs +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/allocator.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/allocator.go new file mode 100644 index 00000000000..f70cd41674b --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/allocator.go @@ -0,0 +1,203 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// Allocator provides a value object allocation strategy. +// Value objects can be allocated by passing an allocator to the "Using" +// receiver functions on the value interfaces, e.g. Map.ZipUsing(allocator, ...). +// Value objects returned from "Using" functions should be given back to the allocator +// once longer needed by calling Allocator.Free(Value). +type Allocator interface { + // Free gives the allocator back any value objects returned by the "Using" + // receiver functions on the value interfaces. + // interface{} may be any of: Value, Map, List or Range. + Free(interface{}) + + // The unexported functions are for "Using" receiver functions of the value types + // to request what they need from the allocator. + allocValueUnstructured() *valueUnstructured + allocListUnstructuredRange() *listUnstructuredRange + allocValueReflect() *valueReflect + allocMapReflect() *mapReflect + allocStructReflect() *structReflect + allocListReflect() *listReflect + allocListReflectRange() *listReflectRange +} + +// HeapAllocator simply allocates objects to the heap. It is the default +// allocator used receiver functions on the value interfaces that do not accept +// an allocator and should be used whenever allocating objects that will not +// be given back to an allocator by calling Allocator.Free(Value). +var HeapAllocator = &heapAllocator{} + +type heapAllocator struct{} + +func (p *heapAllocator) allocValueUnstructured() *valueUnstructured { + return &valueUnstructured{} +} + +func (p *heapAllocator) allocListUnstructuredRange() *listUnstructuredRange { + return &listUnstructuredRange{vv: &valueUnstructured{}} +} + +func (p *heapAllocator) allocValueReflect() *valueReflect { + return &valueReflect{} +} + +func (p *heapAllocator) allocStructReflect() *structReflect { + return &structReflect{} +} + +func (p *heapAllocator) allocMapReflect() *mapReflect { + return &mapReflect{} +} + +func (p *heapAllocator) allocListReflect() *listReflect { + return &listReflect{} +} + +func (p *heapAllocator) allocListReflectRange() *listReflectRange { + return &listReflectRange{vr: &valueReflect{}} +} + +func (p *heapAllocator) Free(_ interface{}) {} + +// NewFreelistAllocator creates freelist based allocator. +// This allocator provides fast allocation and freeing of short lived value objects. +// +// The freelists are bounded in size by freelistMaxSize. If more than this amount of value objects is +// allocated at once, the excess will be returned to the heap for garbage collection when freed. +// +// This allocator is unsafe and must not be accessed concurrently by goroutines. +// +// This allocator works well for traversal of value data trees. Typical usage is to acquire +// a freelist at the beginning of the traversal and use it through out +// for all temporary value access. +func NewFreelistAllocator() Allocator { + return &freelistAllocator{ + valueUnstructured: &freelist{new: func() interface{} { + return &valueUnstructured{} + }}, + listUnstructuredRange: &freelist{new: func() interface{} { + return &listUnstructuredRange{vv: &valueUnstructured{}} + }}, + valueReflect: &freelist{new: func() interface{} { + return &valueReflect{} + }}, + mapReflect: &freelist{new: func() interface{} { + return &mapReflect{} + }}, + structReflect: &freelist{new: func() interface{} { + return &structReflect{} + }}, + listReflect: &freelist{new: func() interface{} { + return &listReflect{} + }}, + listReflectRange: &freelist{new: func() interface{} { + return &listReflectRange{vr: &valueReflect{}} + }}, + } +} + +// Bound memory usage of freelists. This prevents the processing of very large lists from leaking memory. +// This limit is large enough for endpoints objects containing 1000 IP address entries. Freed objects +// that don't fit into the freelist are orphaned on the heap to be garbage collected. +const freelistMaxSize = 1000 + +type freelistAllocator struct { + valueUnstructured *freelist + listUnstructuredRange *freelist + valueReflect *freelist + mapReflect *freelist + structReflect *freelist + listReflect *freelist + listReflectRange *freelist +} + +type freelist struct { + list []interface{} + new func() interface{} +} + +func (f *freelist) allocate() interface{} { + var w2 interface{} + if n := len(f.list); n > 0 { + w2, f.list = f.list[n-1], f.list[:n-1] + } else { + w2 = f.new() + } + return w2 +} + +func (f *freelist) free(v interface{}) { + if len(f.list) < freelistMaxSize { + f.list = append(f.list, v) + } +} + +func (w *freelistAllocator) Free(value interface{}) { + switch v := value.(type) { + case *valueUnstructured: + v.Value = nil // don't hold references to unstructured objects + w.valueUnstructured.free(v) + case *listUnstructuredRange: + v.vv.Value = nil // don't hold references to unstructured objects + w.listUnstructuredRange.free(v) + case *valueReflect: + v.ParentMapKey = nil + v.ParentMap = nil + w.valueReflect.free(v) + case *mapReflect: + w.mapReflect.free(v) + case *structReflect: + w.structReflect.free(v) + case *listReflect: + w.listReflect.free(v) + case *listReflectRange: + v.vr.ParentMapKey = nil + v.vr.ParentMap = nil + w.listReflectRange.free(v) + } +} + +func (w *freelistAllocator) allocValueUnstructured() *valueUnstructured { + return w.valueUnstructured.allocate().(*valueUnstructured) +} + +func (w *freelistAllocator) allocListUnstructuredRange() *listUnstructuredRange { + return w.listUnstructuredRange.allocate().(*listUnstructuredRange) +} + +func (w *freelistAllocator) allocValueReflect() *valueReflect { + return w.valueReflect.allocate().(*valueReflect) +} + +func (w *freelistAllocator) allocStructReflect() *structReflect { + return w.structReflect.allocate().(*structReflect) +} + +func (w *freelistAllocator) allocMapReflect() *mapReflect { + return w.mapReflect.allocate().(*mapReflect) +} + +func (w *freelistAllocator) allocListReflect() *listReflect { + return w.listReflect.allocate().(*listReflect) +} + +func (w *freelistAllocator) allocListReflectRange() *listReflectRange { + return w.listReflectRange.allocate().(*listReflectRange) +} diff --git a/e2e/vendor/k8s.io/component-base/config/v1alpha1/doc.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/doc.go similarity index 67% rename from e2e/vendor/k8s.io/component-base/config/v1alpha1/doc.go rename to e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/doc.go index 7e4d6e8904e..84d7f0f3fc2 100644 --- a/e2e/vendor/k8s.io/component-base/config/v1alpha1/doc.go +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/component-base/config - -package v1alpha1 +// Package value defines types for an in-memory representation of yaml or json +// objects, organized for convenient comparison with a schema (as defined by +// the sibling schema package). Functions for reading and writing the objects +// are also provided. +package value diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/fields.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/fields.go new file mode 100644 index 00000000000..042b0487397 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/fields.go @@ -0,0 +1,105 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "sort" + "strings" +) + +// Field is an individual key-value pair. +type Field struct { + Name string + Value Value +} + +// FieldList is a list of key-value pairs. Each field is expected to +// have a different name. +type FieldList []Field + +// Copy returns a copy of the FieldList. +// Values are not copied. +func (f FieldList) Copy() FieldList { + c := make(FieldList, len(f)) + copy(c, f) + return c +} + +// Sort sorts the field list by Name. +func (f FieldList) Sort() { + if len(f) < 2 { + return + } + if len(f) == 2 { + if f[1].Name < f[0].Name { + f[0], f[1] = f[1], f[0] + } + return + } + sort.SliceStable(f, func(i, j int) bool { + return f[i].Name < f[j].Name + }) +} + +// Less compares two lists lexically. +func (f FieldList) Less(rhs FieldList) bool { + return f.Compare(rhs) == -1 +} + +// Compare compares two lists lexically. The result will be 0 if f==rhs, -1 +// if f < rhs, and +1 if f > rhs. +func (f FieldList) Compare(rhs FieldList) int { + i := 0 + for { + if i >= len(f) && i >= len(rhs) { + // Maps are the same length and all items are equal. + return 0 + } + if i >= len(f) { + // F is shorter. + return -1 + } + if i >= len(rhs) { + // RHS is shorter. + return 1 + } + if c := strings.Compare(f[i].Name, rhs[i].Name); c != 0 { + return c + } + if c := Compare(f[i].Value, rhs[i].Value); c != 0 { + return c + } + // The items are equal; continue. + i++ + } +} + +// Equals returns true if the two fieldslist are equals, false otherwise. +func (f FieldList) Equals(rhs FieldList) bool { + if len(f) != len(rhs) { + return false + } + for i := range f { + if f[i].Name != rhs[i].Name { + return false + } + if !Equals(f[i].Value, rhs[i].Value) { + return false + } + } + return true +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/jsontagutil.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/jsontagutil.go new file mode 100644 index 00000000000..3aadceb2226 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/jsontagutil.go @@ -0,0 +1,146 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" + "reflect" + "strings" +) + +type isZeroer interface { + IsZero() bool +} + +var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem() + +func reflectIsZero(dv reflect.Value) bool { + return dv.IsZero() +} + +// OmitZeroFunc returns a function for a type for a given struct field +// which determines if the value for that field is a zero value, matching +// how the stdlib JSON implementation. +func OmitZeroFunc(t reflect.Type) func(reflect.Value) bool { + // Provide a function that uses a type's IsZero method. + // This matches the go 1.24 custom IsZero() implementation matching + switch { + case t.Kind() == reflect.Interface && t.Implements(isZeroerType): + return func(v reflect.Value) bool { + // Avoid panics calling IsZero on a nil interface or + // non-nil interface with nil pointer. + return safeIsNil(v) || + (v.Elem().Kind() == reflect.Pointer && v.Elem().IsNil()) || + v.Interface().(isZeroer).IsZero() + } + case t.Kind() == reflect.Pointer && t.Implements(isZeroerType): + return func(v reflect.Value) bool { + // Avoid panics calling IsZero on nil pointer. + return safeIsNil(v) || v.Interface().(isZeroer).IsZero() + } + case t.Implements(isZeroerType): + return func(v reflect.Value) bool { + return v.Interface().(isZeroer).IsZero() + } + case reflect.PointerTo(t).Implements(isZeroerType): + return func(v reflect.Value) bool { + if !v.CanAddr() { + // Temporarily box v so we can take the address. + v2 := reflect.New(v.Type()).Elem() + v2.Set(v) + v = v2 + } + return v.Addr().Interface().(isZeroer).IsZero() + } + default: + // default to the reflect.IsZero implementation + return reflectIsZero + } +} + +// TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236 +// but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go + +func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool, omitzero func(reflect.Value) bool) { + tag := f.Tag.Get("json") + if tag == "-" { + return "", true, false, false, nil + } + name, opts := parseTag(tag) + if name == "" { + name = f.Name + } + + if opts.Contains("omitzero") { + omitzero = OmitZeroFunc(f.Type) + } + + return name, false, opts.Contains("inline"), opts.Contains("omitempty"), omitzero +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Chan, reflect.Func: + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + } + return false +} + +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/list.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/list.go new file mode 100644 index 00000000000..0748f18e82e --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/list.go @@ -0,0 +1,139 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// List represents a list object. +type List interface { + // Length returns how many items can be found in the map. + Length() int + // At returns the item at the given position in the map. It will + // panic if the index is out of range. + At(int) Value + // AtUsing uses the provided allocator and returns the item at the given + // position in the map. It will panic if the index is out of range. + // The returned Value should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + AtUsing(Allocator, int) Value + // Range returns a ListRange for iterating over the items in the list. + Range() ListRange + // RangeUsing uses the provided allocator and returns a ListRange for + // iterating over the items in the list. + // The returned Range should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + RangeUsing(Allocator) ListRange + // Equals compares the two lists, and return true if they are the same, false otherwise. + // Implementations can use ListEquals as a general implementation for this methods. + Equals(List) bool + // EqualsUsing uses the provided allocator and compares the two lists, and return true if + // they are the same, false otherwise. Implementations can use ListEqualsUsing as a general + // implementation for this methods. + EqualsUsing(Allocator, List) bool +} + +// ListRange represents a single iteration across the items of a list. +type ListRange interface { + // Next increments to the next item in the range, if there is one, and returns true, or returns false if there are no more items. + Next() bool + // Item returns the index and value of the current item in the range. or panics if there is no current item. + // For efficiency, Item may reuse the values returned by previous Item calls. Callers should be careful avoid holding + // pointers to the value returned by Item() that escape the iteration loop since they become invalid once either + // Item() or Allocator.Free() is called. + Item() (index int, value Value) +} + +var EmptyRange = &emptyRange{} + +type emptyRange struct{} + +func (_ *emptyRange) Next() bool { + return false +} + +func (_ *emptyRange) Item() (index int, value Value) { + panic("Item called on empty ListRange") +} + +// ListEquals compares two lists lexically. +// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. +func ListEquals(lhs, rhs List) bool { + return ListEqualsUsing(HeapAllocator, lhs, rhs) +} + +// ListEqualsUsing uses the provided allocator and compares two lists lexically. +// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. +func ListEqualsUsing(a Allocator, lhs, rhs List) bool { + if lhs.Length() != rhs.Length() { + return false + } + + lhsRange := lhs.RangeUsing(a) + defer a.Free(lhsRange) + rhsRange := rhs.RangeUsing(a) + defer a.Free(rhsRange) + + for lhsRange.Next() && rhsRange.Next() { + _, lv := lhsRange.Item() + _, rv := rhsRange.Item() + if !EqualsUsing(a, lv, rv) { + return false + } + } + return true +} + +// ListLess compares two lists lexically. +func ListLess(lhs, rhs List) bool { + return ListCompare(lhs, rhs) == -1 +} + +// ListCompare compares two lists lexically. The result will be 0 if l==rhs, -1 +// if l < rhs, and +1 if l > rhs. +func ListCompare(lhs, rhs List) int { + return ListCompareUsing(HeapAllocator, lhs, rhs) +} + +// ListCompareUsing uses the provided allocator and compares two lists lexically. The result will be 0 if l==rhs, -1 +// if l < rhs, and +1 if l > rhs. +func ListCompareUsing(a Allocator, lhs, rhs List) int { + lhsRange := lhs.RangeUsing(a) + defer a.Free(lhsRange) + rhsRange := rhs.RangeUsing(a) + defer a.Free(rhsRange) + + for { + lhsOk := lhsRange.Next() + rhsOk := rhsRange.Next() + if !lhsOk && !rhsOk { + // Lists are the same length and all items are equal. + return 0 + } + if !lhsOk { + // LHS is shorter. + return -1 + } + if !rhsOk { + // RHS is shorter. + return 1 + } + _, lv := lhsRange.Item() + _, rv := rhsRange.Item() + if c := CompareUsing(a, lv, rv); c != 0 { + return c + } + // The items are equal; continue. + } +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/listreflect.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/listreflect.go new file mode 100644 index 00000000000..197d4c921d4 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/listreflect.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "reflect" +) + +type listReflect struct { + Value reflect.Value +} + +func (r listReflect) Length() int { + val := r.Value + return val.Len() +} + +func (r listReflect) At(i int) Value { + val := r.Value + return mustWrapValueReflect(val.Index(i), nil, nil) +} + +func (r listReflect) AtUsing(a Allocator, i int) Value { + val := r.Value + return a.allocValueReflect().mustReuse(val.Index(i), nil, nil, nil) +} + +func (r listReflect) Unstructured() interface{} { + l := r.Length() + result := make([]interface{}, l) + for i := 0; i < l; i++ { + result[i] = r.At(i).Unstructured() + } + return result +} + +func (r listReflect) Range() ListRange { + return r.RangeUsing(HeapAllocator) +} + +func (r listReflect) RangeUsing(a Allocator) ListRange { + length := r.Value.Len() + if length == 0 { + return EmptyRange + } + rr := a.allocListReflectRange() + rr.list = r.Value + rr.i = -1 + rr.entry = TypeReflectEntryOf(r.Value.Type().Elem()) + return rr +} + +func (r listReflect) Equals(other List) bool { + return r.EqualsUsing(HeapAllocator, other) +} +func (r listReflect) EqualsUsing(a Allocator, other List) bool { + if otherReflectList, ok := other.(*listReflect); ok { + return reflect.DeepEqual(r.Value.Interface(), otherReflectList.Value.Interface()) + } + return ListEqualsUsing(a, &r, other) +} + +type listReflectRange struct { + list reflect.Value + vr *valueReflect + i int + entry *TypeReflectCacheEntry +} + +func (r *listReflectRange) Next() bool { + r.i += 1 + return r.i < r.list.Len() +} + +func (r *listReflectRange) Item() (index int, value Value) { + if r.i < 0 { + panic("Item() called before first calling Next()") + } + if r.i >= r.list.Len() { + panic("Item() called on ListRange with no more items") + } + v := r.list.Index(r.i) + return r.i, r.vr.mustReuse(v, r.entry, nil, nil) +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/listunstructured.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/listunstructured.go new file mode 100644 index 00000000000..64cd8e7c0c4 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/listunstructured.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +type listUnstructured []interface{} + +func (l listUnstructured) Length() int { + return len(l) +} + +func (l listUnstructured) At(i int) Value { + return NewValueInterface(l[i]) +} + +func (l listUnstructured) AtUsing(a Allocator, i int) Value { + return a.allocValueUnstructured().reuse(l[i]) +} + +func (l listUnstructured) Equals(other List) bool { + return l.EqualsUsing(HeapAllocator, other) +} + +func (l listUnstructured) EqualsUsing(a Allocator, other List) bool { + return ListEqualsUsing(a, &l, other) +} + +func (l listUnstructured) Range() ListRange { + return l.RangeUsing(HeapAllocator) +} + +func (l listUnstructured) RangeUsing(a Allocator) ListRange { + if len(l) == 0 { + return EmptyRange + } + r := a.allocListUnstructuredRange() + r.list = l + r.i = -1 + return r +} + +type listUnstructuredRange struct { + list listUnstructured + vv *valueUnstructured + i int +} + +func (r *listUnstructuredRange) Next() bool { + r.i += 1 + return r.i < len(r.list) +} + +func (r *listUnstructuredRange) Item() (index int, value Value) { + if r.i < 0 { + panic("Item() called before first calling Next()") + } + if r.i >= len(r.list) { + panic("Item() called on ListRange with no more items") + } + return r.i, r.vv.reuse(r.list[r.i]) +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/map.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/map.go new file mode 100644 index 00000000000..168b9fa082b --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/map.go @@ -0,0 +1,270 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "sort" +) + +// Map represents a Map or go structure. +type Map interface { + // Set changes or set the value of the given key. + Set(key string, val Value) + // Get returns the value for the given key, if present, or (nil, false) otherwise. + Get(key string) (Value, bool) + // GetUsing uses the provided allocator and returns the value for the given key, + // if present, or (nil, false) otherwise. + // The returned Value should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + GetUsing(a Allocator, key string) (Value, bool) + // Has returns true if the key is present, or false otherwise. + Has(key string) bool + // Delete removes the key from the map. + Delete(key string) + // Equals compares the two maps, and return true if they are the same, false otherwise. + // Implementations can use MapEquals as a general implementation for this methods. + Equals(other Map) bool + // EqualsUsing uses the provided allocator and compares the two maps, and return true if + // they are the same, false otherwise. Implementations can use MapEqualsUsing as a general + // implementation for this methods. + EqualsUsing(a Allocator, other Map) bool + // Iterate runs the given function for each key/value in the + // map. Returning false in the closure prematurely stops the + // iteration. + Iterate(func(key string, value Value) bool) bool + // IterateUsing uses the provided allocator and runs the given function for each key/value + // in the map. Returning false in the closure prematurely stops the iteration. + IterateUsing(Allocator, func(key string, value Value) bool) bool + // Length returns the number of items in the map. + Length() int + // Empty returns true if the map is empty. + Empty() bool + // Zip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called + // with the values from both maps, otherwise it is called with the value of the map that contains the key and nil + // for the map that does not contain the key. Returning false in the closure prematurely stops the iteration. + Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool + // ZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps + // contain a value for a given key, fn is called with the values from both maps, otherwise it is called with + // the value of the map that contains the key and nil for the map that does not contain the key. Returning + // false in the closure prematurely stops the iteration. + ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool +} + +// MapTraverseOrder defines the map traversal ordering available. +type MapTraverseOrder int + +const ( + // Unordered indicates that the map traversal has no ordering requirement. + Unordered = iota + // LexicalKeyOrder indicates that the map traversal is ordered by key, lexically. + LexicalKeyOrder +) + +// MapZip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called +// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil +// for the other map. Returning false in the closure prematurely stops the iteration. +func MapZip(lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return MapZipUsing(HeapAllocator, lhs, rhs, order, fn) +} + +// MapZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps +// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with +// the value of the map that contains the key and nil for the other map. Returning false in the closure +// prematurely stops the iteration. +func MapZipUsing(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if lhs != nil { + return lhs.ZipUsing(a, rhs, order, fn) + } + if rhs != nil { + return rhs.ZipUsing(a, lhs, order, func(key string, rhs, lhs Value) bool { // arg positions of lhs and rhs deliberately swapped + return fn(key, lhs, rhs) + }) + } + return true +} + +// defaultMapZip provides a default implementation of Zip for implementations that do not need to provide +// their own optimized implementation. +func defaultMapZip(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + switch order { + case Unordered: + return unorderedMapZip(a, lhs, rhs, fn) + case LexicalKeyOrder: + return lexicalKeyOrderedMapZip(a, lhs, rhs, fn) + default: + panic("Unsupported map order") + } +} + +func unorderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { + if (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) { + return true + } + + if lhs != nil { + ok := lhs.IterateUsing(a, func(key string, lhsValue Value) bool { + var rhsValue Value + if rhs != nil { + if item, ok := rhs.GetUsing(a, key); ok { + rhsValue = item + defer a.Free(rhsValue) + } + } + return fn(key, lhsValue, rhsValue) + }) + if !ok { + return false + } + } + if rhs != nil { + return rhs.IterateUsing(a, func(key string, rhsValue Value) bool { + if lhs == nil || !lhs.Has(key) { + return fn(key, nil, rhsValue) + } + return true + }) + } + return true +} + +func lexicalKeyOrderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { + var lhsLength, rhsLength int + var orderedLength int // rough estimate of length of union of map keys + if lhs != nil { + lhsLength = lhs.Length() + orderedLength = lhsLength + } + if rhs != nil { + rhsLength = rhs.Length() + if rhsLength > orderedLength { + orderedLength = rhsLength + } + } + if lhsLength == 0 && rhsLength == 0 { + return true + } + + ordered := make([]string, 0, orderedLength) + if lhs != nil { + lhs.IterateUsing(a, func(key string, _ Value) bool { + ordered = append(ordered, key) + return true + }) + } + if rhs != nil { + rhs.IterateUsing(a, func(key string, _ Value) bool { + if lhs == nil || !lhs.Has(key) { + ordered = append(ordered, key) + } + return true + }) + } + sort.Strings(ordered) + for _, key := range ordered { + var litem, ritem Value + if lhs != nil { + litem, _ = lhs.GetUsing(a, key) + } + if rhs != nil { + ritem, _ = rhs.GetUsing(a, key) + } + ok := fn(key, litem, ritem) + if litem != nil { + a.Free(litem) + } + if ritem != nil { + a.Free(ritem) + } + if !ok { + return false + } + } + return true +} + +// MapLess compares two maps lexically. +func MapLess(lhs, rhs Map) bool { + return MapCompare(lhs, rhs) == -1 +} + +// MapCompare compares two maps lexically. +func MapCompare(lhs, rhs Map) int { + return MapCompareUsing(HeapAllocator, lhs, rhs) +} + +// MapCompareUsing uses the provided allocator and compares two maps lexically. +func MapCompareUsing(a Allocator, lhs, rhs Map) int { + c := 0 + var llength, rlength int + if lhs != nil { + llength = lhs.Length() + } + if rhs != nil { + rlength = rhs.Length() + } + if llength == 0 && rlength == 0 { + return 0 + } + i := 0 + MapZipUsing(a, lhs, rhs, LexicalKeyOrder, func(key string, lhs, rhs Value) bool { + switch { + case i == llength: + c = -1 + case i == rlength: + c = 1 + case lhs == nil: + c = 1 + case rhs == nil: + c = -1 + default: + c = CompareUsing(a, lhs, rhs) + } + i++ + return c == 0 + }) + return c +} + +// MapEquals returns true if lhs == rhs, false otherwise. This function +// acts on generic types and should not be used by callers, but can help +// implement Map.Equals. +// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. +func MapEquals(lhs, rhs Map) bool { + return MapEqualsUsing(HeapAllocator, lhs, rhs) +} + +// MapEqualsUsing uses the provided allocator and returns true if lhs == rhs, +// false otherwise. This function acts on generic types and should not be used +// by callers, but can help implement Map.Equals. +// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. +func MapEqualsUsing(a Allocator, lhs, rhs Map) bool { + if lhs == nil && rhs == nil { + return true + } + if lhs == nil || rhs == nil { + return false + } + if lhs.Length() != rhs.Length() { + return false + } + return MapZipUsing(a, lhs, rhs, Unordered, func(key string, lhs, rhs Value) bool { + if lhs == nil || rhs == nil { + return false + } + return EqualsUsing(a, lhs, rhs) + }) +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/mapreflect.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/mapreflect.go new file mode 100644 index 00000000000..c38402b99a3 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/mapreflect.go @@ -0,0 +1,209 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "reflect" +) + +type mapReflect struct { + valueReflect +} + +func (r mapReflect) Length() int { + val := r.Value + return val.Len() +} + +func (r mapReflect) Empty() bool { + val := r.Value + return val.Len() == 0 +} + +func (r mapReflect) Get(key string) (Value, bool) { + return r.GetUsing(HeapAllocator, key) +} + +func (r mapReflect) GetUsing(a Allocator, key string) (Value, bool) { + k, v, ok := r.get(key) + if !ok { + return nil, false + } + return a.allocValueReflect().mustReuse(v, nil, &r.Value, &k), true +} + +func (r mapReflect) get(k string) (key, value reflect.Value, ok bool) { + mapKey := r.toMapKey(k) + val := r.Value.MapIndex(mapKey) + return mapKey, val, val.IsValid() && val != reflect.Value{} +} + +func (r mapReflect) Has(key string) bool { + var val reflect.Value + val = r.Value.MapIndex(r.toMapKey(key)) + if !val.IsValid() { + return false + } + return val != reflect.Value{} +} + +func (r mapReflect) Set(key string, val Value) { + r.Value.SetMapIndex(r.toMapKey(key), reflect.ValueOf(val.Unstructured())) +} + +func (r mapReflect) Delete(key string) { + val := r.Value + val.SetMapIndex(r.toMapKey(key), reflect.Value{}) +} + +// TODO: Do we need to support types that implement json.Marshaler and are used as string keys? +func (r mapReflect) toMapKey(key string) reflect.Value { + val := r.Value + return reflect.ValueOf(key).Convert(val.Type().Key()) +} + +func (r mapReflect) Iterate(fn func(string, Value) bool) bool { + return r.IterateUsing(HeapAllocator, fn) +} + +func (r mapReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { + if r.Value.Len() == 0 { + return true + } + v := a.allocValueReflect() + defer a.Free(v) + return eachMapEntry(r.Value, func(e *TypeReflectCacheEntry, key reflect.Value, value reflect.Value) bool { + return fn(key.String(), v.mustReuse(value, e, &r.Value, &key)) + }) +} + +func eachMapEntry(val reflect.Value, fn func(*TypeReflectCacheEntry, reflect.Value, reflect.Value) bool) bool { + iter := val.MapRange() + entry := TypeReflectEntryOf(val.Type().Elem()) + for iter.Next() { + next := iter.Value() + if !next.IsValid() { + continue + } + if !fn(entry, iter.Key(), next) { + return false + } + } + return true +} + +func (r mapReflect) Unstructured() interface{} { + result := make(map[string]interface{}, r.Length()) + r.Iterate(func(s string, value Value) bool { + result[s] = value.Unstructured() + return true + }) + return result +} + +func (r mapReflect) Equals(m Map) bool { + return r.EqualsUsing(HeapAllocator, m) +} + +func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { + lhsLength := r.Length() + rhsLength := m.Length() + if lhsLength != rhsLength { + return false + } + if lhsLength == 0 { + return true + } + vr := a.allocValueReflect() + defer a.Free(vr) + entry := TypeReflectEntryOf(r.Value.Type().Elem()) + return m.Iterate(func(key string, value Value) bool { + _, lhsVal, ok := r.get(key) + if !ok { + return false + } + return EqualsUsing(a, vr.mustReuse(lhsVal, entry, nil, nil), value) + }) +} + +func (r mapReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return r.ZipUsing(HeapAllocator, other, order, fn) +} + +func (r mapReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if otherMapReflect, ok := other.(*mapReflect); ok && order == Unordered { + return r.unorderedReflectZip(a, otherMapReflect, fn) + } + return defaultMapZip(a, &r, other, order, fn) +} + +// unorderedReflectZip provides an optimized unordered zip for mapReflect types. +func (r mapReflect) unorderedReflectZip(a Allocator, other *mapReflect, fn func(key string, lhs, rhs Value) bool) bool { + if r.Empty() && (other == nil || other.Empty()) { + return true + } + + lhs := r.Value + lhsEntry := TypeReflectEntryOf(lhs.Type().Elem()) + + // map lookup via reflection is expensive enough that it is better to keep track of visited keys + visited := map[string]struct{}{} + + vlhs, vrhs := a.allocValueReflect(), a.allocValueReflect() + defer a.Free(vlhs) + defer a.Free(vrhs) + + if other != nil { + rhs := other.Value + rhsEntry := TypeReflectEntryOf(rhs.Type().Elem()) + iter := rhs.MapRange() + + for iter.Next() { + key := iter.Key() + keyString := key.String() + next := iter.Value() + if !next.IsValid() { + continue + } + rhsVal := vrhs.mustReuse(next, rhsEntry, &rhs, &key) + visited[keyString] = struct{}{} + var lhsVal Value + if _, v, ok := r.get(keyString); ok { + lhsVal = vlhs.mustReuse(v, lhsEntry, &lhs, &key) + } + if !fn(keyString, lhsVal, rhsVal) { + return false + } + } + } + + iter := lhs.MapRange() + for iter.Next() { + key := iter.Key() + if _, ok := visited[key.String()]; ok { + continue + } + next := iter.Value() + if !next.IsValid() { + continue + } + if !fn(key.String(), vlhs.mustReuse(next, lhsEntry, &lhs, &key), nil) { + return false + } + } + return true +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/mapunstructured.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/mapunstructured.go new file mode 100644 index 00000000000..c3ae00b180e --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/mapunstructured.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +type mapUnstructuredInterface map[interface{}]interface{} + +func (m mapUnstructuredInterface) Set(key string, val Value) { + m[key] = val.Unstructured() +} + +func (m mapUnstructuredInterface) Get(key string) (Value, bool) { + return m.GetUsing(HeapAllocator, key) +} + +func (m mapUnstructuredInterface) GetUsing(a Allocator, key string) (Value, bool) { + if v, ok := m[key]; !ok { + return nil, false + } else { + return a.allocValueUnstructured().reuse(v), true + } +} + +func (m mapUnstructuredInterface) Has(key string) bool { + _, ok := m[key] + return ok +} + +func (m mapUnstructuredInterface) Delete(key string) { + delete(m, key) +} + +func (m mapUnstructuredInterface) Iterate(fn func(key string, value Value) bool) bool { + return m.IterateUsing(HeapAllocator, fn) +} + +func (m mapUnstructuredInterface) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { + if len(m) == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + for k, v := range m { + if ks, ok := k.(string); !ok { + continue + } else { + if !fn(ks, vv.reuse(v)) { + return false + } + } + } + return true +} + +func (m mapUnstructuredInterface) Length() int { + return len(m) +} + +func (m mapUnstructuredInterface) Empty() bool { + return len(m) == 0 +} + +func (m mapUnstructuredInterface) Equals(other Map) bool { + return m.EqualsUsing(HeapAllocator, other) +} + +func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { + lhsLength := m.Length() + rhsLength := other.Length() + if lhsLength != rhsLength { + return false + } + if lhsLength == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + return other.IterateUsing(a, func(key string, value Value) bool { + lhsVal, ok := m[key] + if !ok { + return false + } + return EqualsUsing(a, vv.reuse(lhsVal), value) + }) +} + +func (m mapUnstructuredInterface) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return m.ZipUsing(HeapAllocator, other, order, fn) +} + +func (m mapUnstructuredInterface) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return defaultMapZip(a, m, other, order, fn) +} + +type mapUnstructuredString map[string]interface{} + +func (m mapUnstructuredString) Set(key string, val Value) { + m[key] = val.Unstructured() +} + +func (m mapUnstructuredString) Get(key string) (Value, bool) { + return m.GetUsing(HeapAllocator, key) +} +func (m mapUnstructuredString) GetUsing(a Allocator, key string) (Value, bool) { + if v, ok := m[key]; !ok { + return nil, false + } else { + return a.allocValueUnstructured().reuse(v), true + } +} + +func (m mapUnstructuredString) Has(key string) bool { + _, ok := m[key] + return ok +} + +func (m mapUnstructuredString) Delete(key string) { + delete(m, key) +} + +func (m mapUnstructuredString) Iterate(fn func(key string, value Value) bool) bool { + return m.IterateUsing(HeapAllocator, fn) +} + +func (m mapUnstructuredString) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { + if len(m) == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + for k, v := range m { + if !fn(k, vv.reuse(v)) { + return false + } + } + return true +} + +func (m mapUnstructuredString) Length() int { + return len(m) +} + +func (m mapUnstructuredString) Equals(other Map) bool { + return m.EqualsUsing(HeapAllocator, other) +} + +func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { + lhsLength := m.Length() + rhsLength := other.Length() + if lhsLength != rhsLength { + return false + } + if lhsLength == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + return other.IterateUsing(a, func(key string, value Value) bool { + lhsVal, ok := m[key] + if !ok { + return false + } + return EqualsUsing(a, vv.reuse(lhsVal), value) + }) +} + +func (m mapUnstructuredString) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return m.ZipUsing(HeapAllocator, other, order, fn) +} + +func (m mapUnstructuredString) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return defaultMapZip(a, m, other, order, fn) +} + +func (m mapUnstructuredString) Empty() bool { + return len(m) == 0 +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/reflectcache.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/reflectcache.go new file mode 100644 index 00000000000..3b4a402ee1a --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/reflectcache.go @@ -0,0 +1,492 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "sort" + "sync" + "sync/atomic" +) + +// UnstructuredConverter defines how a type can be converted directly to unstructured. +// Types that implement json.Marshaler may also optionally implement this interface to provide a more +// direct and more efficient conversion. All types that choose to implement this interface must still +// implement this same conversion via json.Marshaler. +type UnstructuredConverter interface { + json.Marshaler // require that json.Marshaler is implemented + + // ToUnstructured returns the unstructured representation. + ToUnstructured() interface{} +} + +// TypeReflectCacheEntry keeps data gathered using reflection about how a type is converted to/from unstructured. +type TypeReflectCacheEntry struct { + isJsonMarshaler bool + ptrIsJsonMarshaler bool + isJsonUnmarshaler bool + ptrIsJsonUnmarshaler bool + isStringConvertable bool + ptrIsStringConvertable bool + + structFields map[string]*FieldCacheEntry + orderedStructFields []*FieldCacheEntry +} + +// FieldCacheEntry keeps data gathered using reflection about how the field of a struct is converted to/from +// unstructured. +type FieldCacheEntry struct { + // JsonName returns the name of the field according to the json tags on the struct field. + JsonName string + // isOmitEmpty is true if the field has the json 'omitempty' tag. + isOmitEmpty bool + // omitzero is set if the field has the json 'omitzero' tag. + omitzero func(reflect.Value) bool + // fieldPath is a list of field indices (see FieldByIndex) to lookup the value of + // a field in a reflect.Value struct. The field indices in the list form a path used + // to traverse through intermediary 'inline' fields. + fieldPath [][]int + + fieldType reflect.Type + TypeEntry *TypeReflectCacheEntry +} + +func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool { + if f.isOmitEmpty && (safeIsNil(fieldVal) || isEmpty(fieldVal)) { + return true + } + if f.omitzero != nil && f.omitzero(fieldVal) { + return true + } + return false +} + +// GetFrom returns the field identified by this FieldCacheEntry from the provided struct. +func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value { + // field might be nested within 'inline' structs + for _, elem := range f.fieldPath { + structVal = dereference(structVal).FieldByIndex(elem) + } + return structVal +} + +var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem() +var unstructuredConvertableType = reflect.TypeOf(new(UnstructuredConverter)).Elem() +var defaultReflectCache = newReflectCache() + +// TypeReflectEntryOf returns the TypeReflectCacheEntry of the provided reflect.Type. +func TypeReflectEntryOf(t reflect.Type) *TypeReflectCacheEntry { + cm := defaultReflectCache.get() + if record, ok := cm[t]; ok { + return record + } + updates := reflectCacheMap{} + result := typeReflectEntryOf(cm, t, updates) + if len(updates) > 0 { + defaultReflectCache.update(updates) + } + return result +} + +// TypeReflectEntryOf returns all updates needed to add provided reflect.Type, and the types its fields transitively +// depend on, to the cache. +func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCacheMap) *TypeReflectCacheEntry { + if record, ok := cm[t]; ok { + return record + } + if record, ok := updates[t]; ok { + return record + } + typeEntry := &TypeReflectCacheEntry{ + isJsonMarshaler: t.Implements(marshalerType), + ptrIsJsonMarshaler: reflect.PtrTo(t).Implements(marshalerType), + isJsonUnmarshaler: reflect.PtrTo(t).Implements(unmarshalerType), + isStringConvertable: t.Implements(unstructuredConvertableType), + ptrIsStringConvertable: reflect.PtrTo(t).Implements(unstructuredConvertableType), + } + if t.Kind() == reflect.Struct { + fieldEntries := map[string]*FieldCacheEntry{} + buildStructCacheEntry(t, fieldEntries, nil) + typeEntry.structFields = fieldEntries + sortedByJsonName := make([]*FieldCacheEntry, len(fieldEntries)) + i := 0 + for _, entry := range fieldEntries { + sortedByJsonName[i] = entry + i++ + } + sort.Slice(sortedByJsonName, func(i, j int) bool { + return sortedByJsonName[i].JsonName < sortedByJsonName[j].JsonName + }) + typeEntry.orderedStructFields = sortedByJsonName + } + + // cyclic type references are allowed, so we must add the typeEntry to the updates map before resolving + // the field.typeEntry references, or creating them if they are not already in the cache + updates[t] = typeEntry + + for _, field := range typeEntry.structFields { + if field.TypeEntry == nil { + field.TypeEntry = typeReflectEntryOf(cm, field.fieldType, updates) + } + } + return typeEntry +} + +func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) { + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + jsonName, omit, isInline, isOmitempty, omitzero := lookupJsonTags(field) + if omit { + continue + } + if isInline { + e := field.Type + if field.Type.Kind() == reflect.Ptr { + e = field.Type.Elem() + } + if e.Kind() == reflect.Struct { + buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + } + continue + } + info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, omitzero: omitzero, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} + infos[jsonName] = info + } +} + +// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. +func (e TypeReflectCacheEntry) Fields() map[string]*FieldCacheEntry { + return e.structFields +} + +// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. +func (e TypeReflectCacheEntry) OrderedFields() []*FieldCacheEntry { + return e.orderedStructFields +} + +// CanConvertToUnstructured returns true if this TypeReflectCacheEntry can convert values of its type to unstructured. +func (e TypeReflectCacheEntry) CanConvertToUnstructured() bool { + return e.isJsonMarshaler || e.ptrIsJsonMarshaler || e.isStringConvertable || e.ptrIsStringConvertable +} + +// ToUnstructured converts the provided value to unstructured and returns it. +func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, error) { + // This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505 + // and is intended to replace it. + + // Check if the object is a nil pointer. + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil, nil + } + // Check if the object has a custom string converter and use it if available, since it is much more efficient + // than round tripping through json. + if converter, ok := e.getUnstructuredConverter(sv); ok { + return converter.ToUnstructured(), nil + } + // Check if the object has a custom JSON marshaller/unmarshaller. + if marshaler, ok := e.getJsonMarshaler(sv); ok { + data, err := marshaler.MarshalJSON() + if err != nil { + return nil, err + } + switch { + case len(data) == 0: + return nil, fmt.Errorf("error decoding from json: empty value") + + case bytes.Equal(data, nullBytes): + // We're done - we don't need to store anything. + return nil, nil + + case bytes.Equal(data, trueBytes): + return true, nil + + case bytes.Equal(data, falseBytes): + return false, nil + + case data[0] == '"': + var result string + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding string from json: %v", err) + } + return result, nil + + case data[0] == '{': + result := make(map[string]interface{}) + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding object from json: %v", err) + } + return result, nil + + case data[0] == '[': + result := make([]interface{}, 0) + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding array from json: %v", err) + } + return result, nil + + default: + var ( + resultInt int64 + resultFloat float64 + err error + ) + if err = unmarshal(data, &resultInt); err == nil { + return resultInt, nil + } else if err = unmarshal(data, &resultFloat); err == nil { + return resultFloat, nil + } else { + return nil, fmt.Errorf("error decoding number from json: %v", err) + } + } + } + + return nil, fmt.Errorf("provided type cannot be converted: %v", sv.Type()) +} + +// CanConvertFromUnstructured returns true if this TypeReflectCacheEntry can convert objects of the type from unstructured. +func (e TypeReflectCacheEntry) CanConvertFromUnstructured() bool { + return e.isJsonUnmarshaler +} + +// FromUnstructured converts the provided source value from unstructured into the provided destination value. +func (e TypeReflectCacheEntry) FromUnstructured(sv, dv reflect.Value) error { + // TODO: this could be made much more efficient using direct conversions like + // UnstructuredConverter.ToUnstructured provides. + st := dv.Type() + data, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st.String(), err) + } + if unmarshaler, ok := e.getJsonUnmarshaler(dv); ok { + return unmarshaler.UnmarshalJSON(data) + } + return fmt.Errorf("unable to unmarshal %v into %v", sv.Type(), dv.Type()) +} + +var ( + nullBytes = []byte("null") + trueBytes = []byte("true") + falseBytes = []byte("false") +) + +func (e TypeReflectCacheEntry) getJsonMarshaler(v reflect.Value) (json.Marshaler, bool) { + if e.isJsonMarshaler { + return v.Interface().(json.Marshaler), true + } + if e.ptrIsJsonMarshaler { + // Check pointer receivers if v is not a pointer + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + return v.Interface().(json.Marshaler), true + } + } + return nil, false +} + +func (e TypeReflectCacheEntry) getJsonUnmarshaler(v reflect.Value) (json.Unmarshaler, bool) { + if !e.isJsonUnmarshaler { + return nil, false + } + return v.Addr().Interface().(json.Unmarshaler), true +} + +func (e TypeReflectCacheEntry) getUnstructuredConverter(v reflect.Value) (UnstructuredConverter, bool) { + if e.isStringConvertable { + return v.Interface().(UnstructuredConverter), true + } + if e.ptrIsStringConvertable { + // Check pointer receivers if v is not a pointer + if v.CanAddr() { + v = v.Addr() + return v.Interface().(UnstructuredConverter), true + } + } + return nil, false +} + +type typeReflectCache struct { + // use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any + // go program using this cache + value atomic.Value + // mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a + // read-lock since the atomic value is always read-only + mu sync.Mutex +} + +func newReflectCache() *typeReflectCache { + cache := &typeReflectCache{} + cache.value.Store(make(reflectCacheMap)) + return cache +} + +type reflectCacheMap map[reflect.Type]*TypeReflectCacheEntry + +// get returns the reflectCacheMap. +func (c *typeReflectCache) get() reflectCacheMap { + return c.value.Load().(reflectCacheMap) +} + +// update merges the provided updates into the cache. +func (c *typeReflectCache) update(updates reflectCacheMap) { + c.mu.Lock() + defer c.mu.Unlock() + + currentCacheMap := c.value.Load().(reflectCacheMap) + + hasNewEntries := false + for t := range updates { + if _, ok := currentCacheMap[t]; !ok { + hasNewEntries = true + break + } + } + if !hasNewEntries { + // Bail if the updates have been set while waiting for lock acquisition. + // This is safe since setting entries is idempotent. + return + } + + newCacheMap := make(reflectCacheMap, len(currentCacheMap)+len(updates)) + for k, v := range currentCacheMap { + newCacheMap[k] = v + } + for t, update := range updates { + newCacheMap[t] = update + } + c.value.Store(newCacheMap) +} + +// Below json Unmarshal is fromk8s.io/apimachinery/pkg/util/json +// to handle number conversions as expected by Kubernetes + +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + +// unmarshal unmarshals the given data +// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +func unmarshal(data []byte, v interface{}) error { + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + next := decoder.InputOffset() + if _, err := decoder.Token(); !errors.Is(err, io.EOF) { + tail := bytes.TrimLeft(data[next:], " \t\r\n") + return fmt.Errorf("unexpected trailing data at offset %d", len(data)-len(tail)) + } + + // If the decode succeeds, post-process the object to convert json.Number objects to int64 or float64 + switch v := v.(type) { + case *map[string]interface{}: + return convertMapNumbers(*v, 0) + + case *[]interface{}: + return convertSliceNumbers(*v, 0) + + case *interface{}: + return convertInterfaceNumbers(v, 0) + + default: + return nil + } +} + +func convertInterfaceNumbers(v *interface{}, depth int) error { + var err error + switch v2 := (*v).(type) { + case json.Number: + *v, err = convertNumber(v2) + case map[string]interface{}: + err = convertMapNumbers(v2, depth+1) + case []interface{}: + err = convertSliceNumbers(v2, depth+1) + } + return err +} + +// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for k, v := range m { + switch v := v.(type) { + case json.Number: + m[k], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for i, v := range s { + switch v := v.(type) { + case json.Number: + s[i], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertNumber converts a json.Number to an int64 or float64, or returns an error +func convertNumber(n json.Number) (interface{}, error) { + // Attempt to convert to an int64 first + if i, err := n.Int64(); err == nil { + return i, nil + } + // Return a float64 (default json.Decode() behavior) + // An overflow will return an error + return n.Float64() +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/scalar.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/scalar.go new file mode 100644 index 00000000000..5824219e513 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/scalar.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// Compare compares floats. The result will be 0 if lhs==rhs, -1 if f < +// rhs, and +1 if f > rhs. +func FloatCompare(lhs, rhs float64) int { + if lhs > rhs { + return 1 + } else if lhs < rhs { + return -1 + } + return 0 +} + +// IntCompare compares integers. The result will be 0 if i==rhs, -1 if i < +// rhs, and +1 if i > rhs. +func IntCompare(lhs, rhs int64) int { + if lhs > rhs { + return 1 + } else if lhs < rhs { + return -1 + } + return 0 +} + +// Compare compares booleans. The result will be 0 if b==rhs, -1 if b < +// rhs, and +1 if b > rhs. +func BoolCompare(lhs, rhs bool) int { + if lhs == rhs { + return 0 + } else if !lhs { + return -1 + } + return 1 +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/structreflect.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/structreflect.go new file mode 100644 index 00000000000..4a7bb5c6ebd --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/structreflect.go @@ -0,0 +1,208 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" + "reflect" +) + +type structReflect struct { + valueReflect +} + +func (r structReflect) Length() int { + i := 0 + eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { + i++ + return true + }) + return i +} + +func (r structReflect) Empty() bool { + return eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { + return false // exit early if the struct is non-empty + }) +} + +func (r structReflect) Get(key string) (Value, bool) { + return r.GetUsing(HeapAllocator, key) +} + +func (r structReflect) GetUsing(a Allocator, key string) (Value, bool) { + if val, ok := r.findJsonNameField(key); ok { + return a.allocValueReflect().mustReuse(val, nil, nil, nil), true + } + return nil, false +} + +func (r structReflect) Has(key string) bool { + _, ok := r.findJsonNameField(key) + return ok +} + +func (r structReflect) Set(key string, val Value) { + fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] + if !ok { + panic(fmt.Sprintf("key %s may not be set on struct %T: field does not exist", key, r.Value.Interface())) + } + oldVal := fieldEntry.GetFrom(r.Value) + newVal := reflect.ValueOf(val.Unstructured()) + r.update(fieldEntry, key, oldVal, newVal) +} + +func (r structReflect) Delete(key string) { + fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] + if !ok { + panic(fmt.Sprintf("key %s may not be deleted on struct %T: field does not exist", key, r.Value.Interface())) + } + oldVal := fieldEntry.GetFrom(r.Value) + if oldVal.Kind() != reflect.Ptr && !fieldEntry.isOmitEmpty { + panic(fmt.Sprintf("key %s may not be deleted on struct: %T: value is neither a pointer nor an omitempty field", key, r.Value.Interface())) + } + r.update(fieldEntry, key, oldVal, reflect.Zero(oldVal.Type())) +} + +func (r structReflect) update(fieldEntry *FieldCacheEntry, key string, oldVal, newVal reflect.Value) { + if oldVal.CanSet() { + oldVal.Set(newVal) + return + } + + // map items are not addressable, so if a struct is contained in a map, the only way to modify it is + // to write a replacement fieldEntry into the map. + if r.ParentMap != nil { + if r.ParentMapKey == nil { + panic("ParentMapKey must not be nil if ParentMap is not nil") + } + replacement := reflect.New(r.Value.Type()).Elem() + fieldEntry.GetFrom(replacement).Set(newVal) + r.ParentMap.SetMapIndex(*r.ParentMapKey, replacement) + return + } + + // This should never happen since NewValueReflect ensures that the root object reflected on is a pointer and map + // item replacement is handled above. + panic(fmt.Sprintf("key %s may not be modified on struct: %T: struct is not settable", key, r.Value.Interface())) +} + +func (r structReflect) Iterate(fn func(string, Value) bool) bool { + return r.IterateUsing(HeapAllocator, fn) +} + +func (r structReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { + vr := a.allocValueReflect() + defer a.Free(vr) + return eachStructField(r.Value, func(e *TypeReflectCacheEntry, s string, value reflect.Value) bool { + return fn(s, vr.mustReuse(value, e, nil, nil)) + }) +} + +func eachStructField(structVal reflect.Value, fn func(*TypeReflectCacheEntry, string, reflect.Value) bool) bool { + for _, fieldCacheEntry := range TypeReflectEntryOf(structVal.Type()).OrderedFields() { + fieldVal := fieldCacheEntry.GetFrom(structVal) + if fieldCacheEntry.CanOmit(fieldVal) { + // omit it + continue + } + ok := fn(fieldCacheEntry.TypeEntry, fieldCacheEntry.JsonName, fieldVal) + if !ok { + return false + } + } + return true +} + +func (r structReflect) Unstructured() interface{} { + // Use number of struct fields as a cheap way to rough estimate map size + result := make(map[string]interface{}, r.Value.NumField()) + r.Iterate(func(s string, value Value) bool { + result[s] = value.Unstructured() + return true + }) + return result +} + +func (r structReflect) Equals(m Map) bool { + return r.EqualsUsing(HeapAllocator, m) +} + +func (r structReflect) EqualsUsing(a Allocator, m Map) bool { + // MapEquals uses zip and is fairly efficient for structReflect + return MapEqualsUsing(a, &r, m) +} + +func (r structReflect) findJsonNameFieldAndNotEmpty(jsonName string) (reflect.Value, bool) { + structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] + if !ok { + return reflect.Value{}, false + } + fieldVal := structCacheEntry.GetFrom(r.Value) + return fieldVal, !structCacheEntry.CanOmit(fieldVal) +} + +func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool) { + structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] + if !ok { + return reflect.Value{}, false + } + fieldVal := structCacheEntry.GetFrom(r.Value) + return fieldVal, !structCacheEntry.CanOmit(fieldVal) +} + +func (r structReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return r.ZipUsing(HeapAllocator, other, order, fn) +} + +func (r structReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if otherStruct, ok := other.(*structReflect); ok && r.Value.Type() == otherStruct.Value.Type() { + lhsvr, rhsvr := a.allocValueReflect(), a.allocValueReflect() + defer a.Free(lhsvr) + defer a.Free(rhsvr) + return r.structZip(otherStruct, lhsvr, rhsvr, fn) + } + return defaultMapZip(a, &r, other, order, fn) +} + +// structZip provides an optimized zip for structReflect types. The zip is always lexical key ordered since there is +// no additional cost to ordering the zip for structured types. +func (r structReflect) structZip(other *structReflect, lhsvr, rhsvr *valueReflect, fn func(key string, lhs, rhs Value) bool) bool { + lhsVal := r.Value + rhsVal := other.Value + + for _, fieldCacheEntry := range TypeReflectEntryOf(lhsVal.Type()).OrderedFields() { + lhsFieldVal := fieldCacheEntry.GetFrom(lhsVal) + rhsFieldVal := fieldCacheEntry.GetFrom(rhsVal) + lhsOmit := fieldCacheEntry.CanOmit(lhsFieldVal) + rhsOmit := fieldCacheEntry.CanOmit(rhsFieldVal) + if lhsOmit && rhsOmit { + continue + } + var lhsVal, rhsVal Value + if !lhsOmit { + lhsVal = lhsvr.mustReuse(lhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) + } + if !rhsOmit { + rhsVal = rhsvr.mustReuse(rhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) + } + if !fn(fieldCacheEntry.JsonName, lhsVal, rhsVal) { + return false + } + } + return true +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/value.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/value.go new file mode 100644 index 00000000000..140b99038e7 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/value.go @@ -0,0 +1,352 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "bytes" + "fmt" + "io" + "strings" + + jsoniter "github.com/json-iterator/go" + + yaml "go.yaml.in/yaml/v2" +) + +var ( + readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool() + writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool() +) + +// A Value corresponds to an 'atom' in the schema. It should return true +// for at least one of the IsXXX methods below, or the value is +// considered "invalid" +type Value interface { + // IsMap returns true if the Value is a Map, false otherwise. + IsMap() bool + // IsList returns true if the Value is a List, false otherwise. + IsList() bool + // IsBool returns true if the Value is a bool, false otherwise. + IsBool() bool + // IsInt returns true if the Value is a int64, false otherwise. + IsInt() bool + // IsFloat returns true if the Value is a float64, false + // otherwise. + IsFloat() bool + // IsString returns true if the Value is a string, false + // otherwise. + IsString() bool + // IsMap returns true if the Value is null, false otherwise. + IsNull() bool + + // AsMap converts the Value into a Map (or panic if the type + // doesn't allow it). + AsMap() Map + // AsMapUsing uses the provided allocator and converts the Value + // into a Map (or panic if the type doesn't allow it). + AsMapUsing(Allocator) Map + // AsList converts the Value into a List (or panic if the type + // doesn't allow it). + AsList() List + // AsListUsing uses the provided allocator and converts the Value + // into a List (or panic if the type doesn't allow it). + AsListUsing(Allocator) List + // AsBool converts the Value into a bool (or panic if the type + // doesn't allow it). + AsBool() bool + // AsInt converts the Value into an int64 (or panic if the type + // doesn't allow it). + AsInt() int64 + // AsFloat converts the Value into a float64 (or panic if the type + // doesn't allow it). + AsFloat() float64 + // AsString converts the Value into a string (or panic if the type + // doesn't allow it). + AsString() string + + // Unstructured converts the Value into an Unstructured interface{}. + Unstructured() interface{} +} + +// FromJSON is a helper function for reading a JSON document. +func FromJSON(input []byte) (Value, error) { + return FromJSONFast(input) +} + +// FromJSONFast is a helper function for reading a JSON document. +func FromJSONFast(input []byte) (Value, error) { + iter := readPool.BorrowIterator(input) + defer readPool.ReturnIterator(iter) + return readJSONIter(iter) +} + +// ToJSON is a helper function for producing a JSon document. +func ToJSON(v Value) ([]byte, error) { + buf := bytes.Buffer{} + stream := writePool.BorrowStream(&buf) + defer writePool.ReturnStream(stream) + writeJSONStream(v, stream) + b := stream.Buffer() + err := stream.Flush() + // Help jsoniter manage its buffers--without this, the next + // use of the stream is likely to require an allocation. Look + // at the jsoniter stream code to understand why. They were probably + // optimizing for folks using the buffer directly. + stream.SetBuffer(b[:0]) + return buf.Bytes(), err +} + +// readJSONIter reads a Value from a JSON iterator. +// DO NOT EXPORT +// TODO: eliminate this https://github.com/kubernetes-sigs/structured-merge-diff/issues/202 +func readJSONIter(iter *jsoniter.Iterator) (Value, error) { + v := iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + return nil, iter.Error + } + return NewValueInterface(v), nil +} + +// writeJSONStream writes a value into a JSON stream. +// DO NOT EXPORT +// TODO: eliminate this https://github.com/kubernetes-sigs/structured-merge-diff/issues/202 +func writeJSONStream(v Value, stream *jsoniter.Stream) { + stream.WriteVal(v.Unstructured()) +} + +// ToYAML marshals a value as YAML. +func ToYAML(v Value) ([]byte, error) { + return yaml.Marshal(v.Unstructured()) +} + +// Equals returns true iff the two values are equal. +func Equals(lhs, rhs Value) bool { + return EqualsUsing(HeapAllocator, lhs, rhs) +} + +// EqualsUsing uses the provided allocator and returns true iff the two values are equal. +func EqualsUsing(a Allocator, lhs, rhs Value) bool { + if lhs.IsFloat() || rhs.IsFloat() { + var lf float64 + if lhs.IsFloat() { + lf = lhs.AsFloat() + } else if lhs.IsInt() { + lf = float64(lhs.AsInt()) + } else { + return false + } + var rf float64 + if rhs.IsFloat() { + rf = rhs.AsFloat() + } else if rhs.IsInt() { + rf = float64(rhs.AsInt()) + } else { + return false + } + return lf == rf + } + if lhs.IsInt() { + if rhs.IsInt() { + return lhs.AsInt() == rhs.AsInt() + } + return false + } else if rhs.IsInt() { + return false + } + if lhs.IsString() { + if rhs.IsString() { + return lhs.AsString() == rhs.AsString() + } + return false + } else if rhs.IsString() { + return false + } + if lhs.IsBool() { + if rhs.IsBool() { + return lhs.AsBool() == rhs.AsBool() + } + return false + } else if rhs.IsBool() { + return false + } + if lhs.IsList() { + if rhs.IsList() { + lhsList := lhs.AsListUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsListUsing(a) + defer a.Free(rhsList) + return lhsList.EqualsUsing(a, rhsList) + } + return false + } else if rhs.IsList() { + return false + } + if lhs.IsMap() { + if rhs.IsMap() { + lhsList := lhs.AsMapUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsMapUsing(a) + defer a.Free(rhsList) + return lhsList.EqualsUsing(a, rhsList) + } + return false + } else if rhs.IsMap() { + return false + } + if lhs.IsNull() { + if rhs.IsNull() { + return true + } + return false + } else if rhs.IsNull() { + return false + } + // No field is set, on either objects. + return true +} + +// ToString returns a human-readable representation of the value. +func ToString(v Value) string { + if v.IsNull() { + return "null" + } + switch { + case v.IsFloat(): + return fmt.Sprintf("%v", v.AsFloat()) + case v.IsInt(): + return fmt.Sprintf("%v", v.AsInt()) + case v.IsString(): + return fmt.Sprintf("%q", v.AsString()) + case v.IsBool(): + return fmt.Sprintf("%v", v.AsBool()) + case v.IsList(): + strs := []string{} + list := v.AsList() + for i := 0; i < list.Length(); i++ { + strs = append(strs, ToString(list.At(i))) + } + return "[" + strings.Join(strs, ",") + "]" + case v.IsMap(): + strs := []string{} + v.AsMap().Iterate(func(k string, v Value) bool { + strs = append(strs, fmt.Sprintf("%v=%v", k, ToString(v))) + return true + }) + return strings.Join(strs, "") + } + // No field is set, on either objects. + return "{{undefined}}" +} + +// Less provides a total ordering for Value (so that they can be sorted, even +// if they are of different types). +func Less(lhs, rhs Value) bool { + return Compare(lhs, rhs) == -1 +} + +// Compare provides a total ordering for Value (so that they can be +// sorted, even if they are of different types). The result will be 0 if +// v==rhs, -1 if v < rhs, and +1 if v > rhs. +func Compare(lhs, rhs Value) int { + return CompareUsing(HeapAllocator, lhs, rhs) +} + +// CompareUsing uses the provided allocator and provides a total +// ordering for Value (so that they can be sorted, even if they +// are of different types). The result will be 0 if v==rhs, -1 +// if v < rhs, and +1 if v > rhs. +func CompareUsing(a Allocator, lhs, rhs Value) int { + if lhs.IsFloat() { + if !rhs.IsFloat() { + // Extra: compare floats and ints numerically. + if rhs.IsInt() { + return FloatCompare(lhs.AsFloat(), float64(rhs.AsInt())) + } + return -1 + } + return FloatCompare(lhs.AsFloat(), rhs.AsFloat()) + } else if rhs.IsFloat() { + // Extra: compare floats and ints numerically. + if lhs.IsInt() { + return FloatCompare(float64(lhs.AsInt()), rhs.AsFloat()) + } + return 1 + } + + if lhs.IsInt() { + if !rhs.IsInt() { + return -1 + } + return IntCompare(lhs.AsInt(), rhs.AsInt()) + } else if rhs.IsInt() { + return 1 + } + + if lhs.IsString() { + if !rhs.IsString() { + return -1 + } + return strings.Compare(lhs.AsString(), rhs.AsString()) + } else if rhs.IsString() { + return 1 + } + + if lhs.IsBool() { + if !rhs.IsBool() { + return -1 + } + return BoolCompare(lhs.AsBool(), rhs.AsBool()) + } else if rhs.IsBool() { + return 1 + } + + if lhs.IsList() { + if !rhs.IsList() { + return -1 + } + lhsList := lhs.AsListUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsListUsing(a) + defer a.Free(rhsList) + return ListCompareUsing(a, lhsList, rhsList) + } else if rhs.IsList() { + return 1 + } + if lhs.IsMap() { + if !rhs.IsMap() { + return -1 + } + lhsMap := lhs.AsMapUsing(a) + defer a.Free(lhsMap) + rhsMap := rhs.AsMapUsing(a) + defer a.Free(rhsMap) + return MapCompareUsing(a, lhsMap, rhsMap) + } else if rhs.IsMap() { + return 1 + } + if lhs.IsNull() { + if !rhs.IsNull() { + return -1 + } + return 0 + } else if rhs.IsNull() { + return 1 + } + + // Invalid Value-- nothing is set. + return 0 +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/valuereflect.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/valuereflect.go new file mode 100644 index 00000000000..05e70debaef --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/valuereflect.go @@ -0,0 +1,294 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "encoding/base64" + "fmt" + "reflect" +) + +// NewValueReflect creates a Value backed by an "interface{}" type, +// typically an structured object in Kubernetes world that is uses reflection to expose. +// The provided "interface{}" value must be a pointer so that the value can be modified via reflection. +// The provided "interface{}" may contain structs and types that are converted to Values +// by the jsonMarshaler interface. +func NewValueReflect(value interface{}) (Value, error) { + if value == nil { + return NewValueInterface(nil), nil + } + v := reflect.ValueOf(value) + if v.Kind() != reflect.Ptr { + // The root value to reflect on must be a pointer so that map.Set() and map.Delete() operations are possible. + return nil, fmt.Errorf("value provided to NewValueReflect must be a pointer") + } + return wrapValueReflect(v, nil, nil) +} + +// wrapValueReflect wraps the provide reflect.Value as a value. If parent in the data tree is a map, parentMap +// and parentMapKey must be provided so that the returned value may be set and deleted. +func wrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) (Value, error) { + val := HeapAllocator.allocValueReflect() + return val.reuse(value, nil, parentMap, parentMapKey) +} + +// wrapValueReflect wraps the provide reflect.Value as a value, and panics if there is an error. If parent in the data +// tree is a map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. +func mustWrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) Value { + v, err := wrapValueReflect(value, parentMap, parentMapKey) + if err != nil { + panic(err) + } + return v +} + +// the value interface doesn't care about the type for value.IsNull, so we can use a constant +var nilType = reflect.TypeOf(&struct{}{}) + +// reuse replaces the value of the valueReflect. If parent in the data tree is a map, parentMap and parentMapKey +// must be provided so that the returned value may be set and deleted. +func (r *valueReflect) reuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) (Value, error) { + if cacheEntry == nil { + cacheEntry = TypeReflectEntryOf(value.Type()) + } + if cacheEntry.CanConvertToUnstructured() { + u, err := cacheEntry.ToUnstructured(value) + if err != nil { + return nil, err + } + if u == nil { + value = reflect.Zero(nilType) + } else { + value = reflect.ValueOf(u) + } + } + r.Value = dereference(value) + r.ParentMap = parentMap + r.ParentMapKey = parentMapKey + r.kind = kind(r.Value) + return r, nil +} + +// mustReuse replaces the value of the valueReflect and panics if there is an error. If parent in the data tree is a +// map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. +func (r *valueReflect) mustReuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) Value { + v, err := r.reuse(value, cacheEntry, parentMap, parentMapKey) + if err != nil { + panic(err) + } + return v +} + +func dereference(val reflect.Value) reflect.Value { + kind := val.Kind() + if (kind == reflect.Interface || kind == reflect.Ptr) && !safeIsNil(val) { + return val.Elem() + } + return val +} + +type valueReflect struct { + ParentMap *reflect.Value + ParentMapKey *reflect.Value + Value reflect.Value + kind reflectType +} + +func (r valueReflect) IsMap() bool { + return r.kind == mapType || r.kind == structMapType +} + +func (r valueReflect) IsList() bool { + return r.kind == listType +} + +func (r valueReflect) IsBool() bool { + return r.kind == boolType +} + +func (r valueReflect) IsInt() bool { + return r.kind == intType || r.kind == uintType +} + +func (r valueReflect) IsFloat() bool { + return r.kind == floatType +} + +func (r valueReflect) IsString() bool { + return r.kind == stringType || r.kind == byteStringType +} + +func (r valueReflect) IsNull() bool { + return r.kind == nullType +} + +type reflectType = int + +const ( + mapType = iota + structMapType + listType + intType + uintType + floatType + stringType + byteStringType + boolType + nullType +) + +func kind(v reflect.Value) reflectType { + typ := v.Type() + rk := typ.Kind() + switch rk { + case reflect.Map: + if v.IsNil() { + return nullType + } + return mapType + case reflect.Struct: + return structMapType + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + return intType + case reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8: + // Uint64 deliberately excluded, see valueUnstructured.Int. + return uintType + case reflect.Float64, reflect.Float32: + return floatType + case reflect.String: + return stringType + case reflect.Bool: + return boolType + case reflect.Slice: + if v.IsNil() { + return nullType + } + elemKind := typ.Elem().Kind() + if elemKind == reflect.Uint8 { + return byteStringType + } + return listType + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Interface: + if v.IsNil() { + return nullType + } + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + default: + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + } +} + +// TODO find a cleaner way to avoid panics from reflect.IsNil() +func safeIsNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return v.IsNil() + } + return false +} + +func (r valueReflect) AsMap() Map { + return r.AsMapUsing(HeapAllocator) +} + +func (r valueReflect) AsMapUsing(a Allocator) Map { + switch r.kind { + case structMapType: + v := a.allocStructReflect() + v.valueReflect = r + return v + case mapType: + v := a.allocMapReflect() + v.valueReflect = r + return v + default: + panic("value is not a map or struct") + } +} + +func (r valueReflect) AsList() List { + return r.AsListUsing(HeapAllocator) +} + +func (r valueReflect) AsListUsing(a Allocator) List { + if r.IsList() { + v := a.allocListReflect() + v.Value = r.Value + return v + } + panic("value is not a list") +} + +func (r valueReflect) AsBool() bool { + if r.IsBool() { + return r.Value.Bool() + } + panic("value is not a bool") +} + +func (r valueReflect) AsInt() int64 { + if r.kind == intType { + return r.Value.Int() + } + if r.kind == uintType { + return int64(r.Value.Uint()) + } + + panic("value is not an int") +} + +func (r valueReflect) AsFloat() float64 { + if r.IsFloat() { + return r.Value.Float() + } + panic("value is not a float") +} + +func (r valueReflect) AsString() string { + switch r.kind { + case stringType: + return r.Value.String() + case byteStringType: + return base64.StdEncoding.EncodeToString(r.Value.Bytes()) + } + panic("value is not a string") +} + +func (r valueReflect) Unstructured() interface{} { + val := r.Value + switch { + case r.IsNull(): + return nil + case val.Kind() == reflect.Struct: + return structReflect{r}.Unstructured() + case val.Kind() == reflect.Map: + return mapReflect{valueReflect: r}.Unstructured() + case r.IsList(): + return listReflect{r.Value}.Unstructured() + case r.IsString(): + return r.AsString() + case r.IsInt(): + return r.AsInt() + case r.IsBool(): + return r.AsBool() + case r.IsFloat(): + return r.AsFloat() + default: + panic(fmt.Sprintf("value of type %s is not a supported by value reflector", val.Type())) + } +} diff --git a/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/valueunstructured.go b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/valueunstructured.go new file mode 100644 index 00000000000..ac5a926285d --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/structured-merge-diff/v6/value/valueunstructured.go @@ -0,0 +1,178 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" +) + +// NewValueInterface creates a Value backed by an "interface{}" type, +// typically an unstructured object in Kubernetes world. +// interface{} must be one of: map[string]interface{}, map[interface{}]interface{}, []interface{}, int types, float types, +// string or boolean. Nested interface{} must also be one of these types. +func NewValueInterface(v interface{}) Value { + return Value(HeapAllocator.allocValueUnstructured().reuse(v)) +} + +type valueUnstructured struct { + Value interface{} +} + +// reuse replaces the value of the valueUnstructured. +func (vi *valueUnstructured) reuse(value interface{}) Value { + vi.Value = value + return vi +} + +func (v valueUnstructured) IsMap() bool { + if _, ok := v.Value.(map[string]interface{}); ok { + return true + } + if _, ok := v.Value.(map[interface{}]interface{}); ok { + return true + } + return false +} + +func (v valueUnstructured) AsMap() Map { + return v.AsMapUsing(HeapAllocator) +} + +func (v valueUnstructured) AsMapUsing(_ Allocator) Map { + if v.Value == nil { + panic("invalid nil") + } + switch t := v.Value.(type) { + case map[string]interface{}: + return mapUnstructuredString(t) + case map[interface{}]interface{}: + return mapUnstructuredInterface(t) + } + panic(fmt.Errorf("not a map: %#v", v)) +} + +func (v valueUnstructured) IsList() bool { + if v.Value == nil { + return false + } + _, ok := v.Value.([]interface{}) + return ok +} + +func (v valueUnstructured) AsList() List { + return v.AsListUsing(HeapAllocator) +} + +func (v valueUnstructured) AsListUsing(_ Allocator) List { + return listUnstructured(v.Value.([]interface{})) +} + +func (v valueUnstructured) IsFloat() bool { + if v.Value == nil { + return false + } else if _, ok := v.Value.(float64); ok { + return true + } else if _, ok := v.Value.(float32); ok { + return true + } + return false +} + +func (v valueUnstructured) AsFloat() float64 { + if f, ok := v.Value.(float32); ok { + return float64(f) + } + return v.Value.(float64) +} + +func (v valueUnstructured) IsInt() bool { + if v.Value == nil { + return false + } else if _, ok := v.Value.(int); ok { + return true + } else if _, ok := v.Value.(int8); ok { + return true + } else if _, ok := v.Value.(int16); ok { + return true + } else if _, ok := v.Value.(int32); ok { + return true + } else if _, ok := v.Value.(int64); ok { + return true + } else if _, ok := v.Value.(uint); ok { + return true + } else if _, ok := v.Value.(uint8); ok { + return true + } else if _, ok := v.Value.(uint16); ok { + return true + } else if _, ok := v.Value.(uint32); ok { + return true + } + return false +} + +func (v valueUnstructured) AsInt() int64 { + if i, ok := v.Value.(int); ok { + return int64(i) + } else if i, ok := v.Value.(int8); ok { + return int64(i) + } else if i, ok := v.Value.(int16); ok { + return int64(i) + } else if i, ok := v.Value.(int32); ok { + return int64(i) + } else if i, ok := v.Value.(uint); ok { + return int64(i) + } else if i, ok := v.Value.(uint8); ok { + return int64(i) + } else if i, ok := v.Value.(uint16); ok { + return int64(i) + } else if i, ok := v.Value.(uint32); ok { + return int64(i) + } + return v.Value.(int64) +} + +func (v valueUnstructured) IsString() bool { + if v.Value == nil { + return false + } + _, ok := v.Value.(string) + return ok +} + +func (v valueUnstructured) AsString() string { + return v.Value.(string) +} + +func (v valueUnstructured) IsBool() bool { + if v.Value == nil { + return false + } + _, ok := v.Value.(bool) + return ok +} + +func (v valueUnstructured) AsBool() bool { + return v.Value.(bool) +} + +func (v valueUnstructured) IsNull() bool { + return v.Value == nil +} + +func (v valueUnstructured) Unstructured() interface{} { + return v.Value +} diff --git a/e2e/vendor/sigs.k8s.io/yaml/.travis.yml b/e2e/vendor/sigs.k8s.io/yaml/.travis.yml deleted file mode 100644 index 54ed8f9cb96..00000000000 --- a/e2e/vendor/sigs.k8s.io/yaml/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: arm64 -dist: focal -go: 1.15.x -script: - - diff -u <(echo -n) <(gofmt -d *.go) - - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) - - GO111MODULE=on go vet . - - GO111MODULE=on go test -v -race ./... - - git diff --exit-code -install: - - GO111MODULE=off go get golang.org/x/lint/golint diff --git a/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE deleted file mode 100644 index 8dada3edaf5..00000000000 --- a/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS deleted file mode 100644 index 73be0a3a9bd..00000000000 --- a/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS +++ /dev/null @@ -1,24 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- dims -- jpbetz -- smarterclayton -- deads2k -- sttts -- liggitt -- natasha41575 -- knverey -reviewers: -- dims -- thockin -- jpbetz -- smarterclayton -- deads2k -- derekwaynecarr -- mikedanese -- liggitt -- sttts -- tallclair -labels: -- sig/api-machinery diff --git a/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md index 53f4139dc31..9a8f1e67828 100644 --- a/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md +++ b/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md @@ -1,143 +1,71 @@ -# go-yaml fork +# goyaml.v2 -This package is a fork of the go-yaml library and is intended solely for consumption -by kubernetes projects. In this fork, we plan to support only critical changes required for -kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests -should be made in the upstream go-yaml library, and we will reject such changes in this fork -unless we are pulling them from upstream. +This package provides type and function aliases for the `go.yaml.in/yaml/v2` package (which is compatible with `gopkg.in/yaml.v2`). -This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0 +## Purpose -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` +The purpose of this package is to: -This example will generate the following output: +1. Provide a transition path for users migrating from the sigs.k8s.io/yaml package to direct usage of go.yaml.in/yaml/v2 +2. Maintain compatibility with existing code while encouraging migration to the upstream package +3. Reduce maintenance overhead by delegating to the upstream implementation +## Usage + +Instead of importing this package directly, you should migrate to using `go.yaml.in/yaml/v2` directly: + +```go +// Old way +import "sigs.k8s.io/yaml/goyaml.v2" + +// Recommended way +import "go.yaml.in/yaml/v2" ``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` +## Available Types and Functions + +All public types and functions from `go.yaml.in/yaml/v2` are available through this package: + +### Types + +- `MapSlice` - Encodes and decodes as a YAML map with preserved key order +- `MapItem` - An item in a MapSlice +- `Unmarshaler` - Interface for custom unmarshaling behavior +- `Marshaler` - Interface for custom marshaling behavior +- `IsZeroer` - Interface to check if an object is zero +- `Decoder` - Reads and decodes YAML values from an input stream +- `Encoder` - Writes YAML values to an output stream +- `TypeError` - Error returned by Unmarshal for decoding issues + +### Functions + +- `Unmarshal` - Decodes YAML data into a Go value +- `UnmarshalStrict` - Like Unmarshal but errors on unknown fields +- `Marshal` - Serializes a Go value into YAML +- `NewDecoder` - Creates a new Decoder +- `NewEncoder` - Creates a new Encoder +- `FutureLineWrap` - Controls line wrapping behavior + +## Migration Guide + +To migrate from this package to `go.yaml.in/yaml/v2`: + +1. Update your import statements: + ```go + // From + import "sigs.k8s.io/yaml/goyaml.v2" + + // To + import "go.yaml.in/yaml/v2" + ``` + +2. No code changes should be necessary as the API is identical + +3. Update your go.mod file to include the dependency: + ``` + require go.yaml.in/yaml/v2 v2.4.2 + ``` + +## Deprecation Notice + +All types and functions in this package are marked as deprecated. You should migrate to using `go.yaml.in/yaml/v2` directly. diff --git a/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go b/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go new file mode 100644 index 00000000000..8c82bc2cb94 --- /dev/null +++ b/e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go @@ -0,0 +1,85 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + gopkg_yaml "go.yaml.in/yaml/v2" +) + +// Type aliases for public types from go.yaml.in/yaml/v2 +type ( + // MapSlice encodes and decodes as a YAML map. + // The order of keys is preserved when encoding and decoding. + // Deprecated: Use go.yaml.in/yaml/v2.MapSlice directly. + MapSlice = gopkg_yaml.MapSlice + + // MapItem is an item in a MapSlice. + // Deprecated: Use go.yaml.in/yaml/v2.MapItem directly. + MapItem = gopkg_yaml.MapItem + + // Unmarshaler is implemented by types to customize their behavior when being unmarshaled from a YAML document. + // Deprecated: Use go.yaml.in/yaml/v2.Unmarshaler directly. + Unmarshaler = gopkg_yaml.Unmarshaler + + // Marshaler is implemented by types to customize their behavior when being marshaled into a YAML document. + // Deprecated: Use go.yaml.in/yaml/v2.Marshaler directly. + Marshaler = gopkg_yaml.Marshaler + + // IsZeroer is used to check whether an object is zero to determine whether it should be omitted when + // marshaling with the omitempty flag. One notable implementation is time.Time. + // Deprecated: Use go.yaml.in/yaml/v2.IsZeroer directly. + IsZeroer = gopkg_yaml.IsZeroer + + // Decoder reads and decodes YAML values from an input stream. + // Deprecated: Use go.yaml.in/yaml/v2.Decoder directly. + Decoder = gopkg_yaml.Decoder + + // Encoder writes YAML values to an output stream. + // Deprecated: Use go.yaml.in/yaml/v2.Encoder directly. + Encoder = gopkg_yaml.Encoder + + // TypeError is returned by Unmarshal when one or more fields in the YAML document cannot be properly decoded. + // Deprecated: Use go.yaml.in/yaml/v2.TypeError directly. + TypeError = gopkg_yaml.TypeError +) + +// Function aliases for public functions from go.yaml.in/yaml/v2 +var ( + // Unmarshal decodes the first document found within the in byte slice and assigns decoded values into the out value. + // Deprecated: Use go.yaml.in/yaml/v2.Unmarshal directly. + Unmarshal = gopkg_yaml.Unmarshal + + // UnmarshalStrict is like Unmarshal except that any fields that are found in the data that do not have corresponding struct members will result in an error. + // Deprecated: Use go.yaml.in/yaml/v2.UnmarshalStrict directly. + UnmarshalStrict = gopkg_yaml.UnmarshalStrict + + // Marshal serializes the value provided into a YAML document. + // Deprecated: Use go.yaml.in/yaml/v2.Marshal directly. + Marshal = gopkg_yaml.Marshal + + // NewDecoder returns a new decoder that reads from r. + // Deprecated: Use go.yaml.in/yaml/v2.NewDecoder directly. + NewDecoder = gopkg_yaml.NewDecoder + + // NewEncoder returns a new encoder that writes to w. + // Deprecated: Use go.yaml.in/yaml/v2.NewEncoder directly. + NewEncoder = gopkg_yaml.NewEncoder + + // FutureLineWrap globally disables line wrapping when encoding long strings. + // Deprecated: Use go.yaml.in/yaml/v2.FutureLineWrap directly. + FutureLineWrap = gopkg_yaml.FutureLineWrap +) diff --git a/e2e/vendor/sigs.k8s.io/yaml/yaml.go b/e2e/vendor/sigs.k8s.io/yaml/yaml.go index fc10246bdb2..aa01acd45d9 100644 --- a/e2e/vendor/sigs.k8s.io/yaml/yaml.go +++ b/e2e/vendor/sigs.k8s.io/yaml/yaml.go @@ -24,7 +24,7 @@ import ( "reflect" "strconv" - "sigs.k8s.io/yaml/goyaml.v2" + "go.yaml.in/yaml/v2" ) // Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference) @@ -92,7 +92,7 @@ func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error { d = opt(d) } if err := d.Decode(&obj); err != nil { - return fmt.Errorf("while decoding JSON: %v", err) + return fmt.Errorf("while decoding JSON: %w", err) } return nil } @@ -417,3 +417,10 @@ func jsonToYAMLValue(j interface{}) interface{} { } return j } + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} diff --git a/e2e/vendor/sigs.k8s.io/yaml/yaml_go110.go b/e2e/vendor/sigs.k8s.io/yaml/yaml_go110.go deleted file mode 100644 index 94abc1719dc..00000000000 --- a/e2e/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ /dev/null @@ -1,31 +0,0 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -//go:build go1.10 -// +build go1.10 - -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -} From ebe3d5cea9695e1dfc1bbdab7c882bbe7839de9b Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Fri, 10 Oct 2025 13:21:01 +0200 Subject: [PATCH 113/157] rebase: update all k8s.io modules to x.34.1 un /e2e Signed-off-by: Niels de Vos --- e2e/go.mod | 21 +- e2e/go.sum | 33 +- .../v1beta1/generated.pb.go | 5038 ++++-- .../v1beta1/generated.proto | 301 + .../admissionregistration/v1beta1/register.go | 4 + .../admissionregistration/v1beta1/types.go | 331 +- .../v1beta1/types_swagger_doc_generated.go | 95 + .../v1beta1/zz_generated.deepcopy.go | 254 +- .../zz_generated.prerelease-lifecycle.go | 72 + e2e/vendor/k8s.io/api/apps/v1/generated.proto | 2 +- e2e/vendor/k8s.io/api/apps/v1/types.go | 2 +- .../apps/v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/apps/v1beta1/generated.proto | 3 + e2e/vendor/k8s.io/api/apps/v1beta1/types.go | 4 + .../k8s.io/api/apps/v1beta2/generated.proto | 5 +- e2e/vendor/k8s.io/api/apps/v1beta2/types.go | 6 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- .../api/authorization/v1/generated.proto | 6 - .../k8s.io/api/authorization/v1/types.go | 6 - .../v1/types_swagger_doc_generated.go | 4 +- .../k8s.io/api/autoscaling/v1/generated.proto | 3 + e2e/vendor/k8s.io/api/autoscaling/v1/types.go | 4 + .../k8s.io/api/batch/v1/generated.proto | 7 +- e2e/vendor/k8s.io/api/batch/v1/types.go | 7 +- .../batch/v1/types_swagger_doc_generated.go | 6 +- .../api/certificates/v1/generated.proto | 7 + .../k8s.io/api/certificates/v1/types.go | 7 + .../api/certificates/v1alpha1/generated.pb.go | 1511 +- .../api/certificates/v1alpha1/generated.proto | 205 + .../api/certificates/v1alpha1/register.go | 2 + .../k8s.io/api/certificates/v1alpha1/types.go | 231 + .../v1alpha1/types_swagger_doc_generated.go | 52 + .../v1alpha1/zz_generated.deepcopy.go | 128 + .../zz_generated.prerelease-lifecycle.go | 36 + .../api/certificates/v1beta1/generated.proto | 7 + .../k8s.io/api/certificates/v1beta1/types.go | 7 + e2e/vendor/k8s.io/api/core/v1/generated.pb.go | 5809 ++++--- e2e/vendor/k8s.io/api/core/v1/generated.proto | 305 +- e2e/vendor/k8s.io/api/core/v1/types.go | 320 +- .../core/v1/types_swagger_doc_generated.go | 149 +- .../api/core/v1/zz_generated.deepcopy.go | 155 + .../k8s.io/api/extensions/v1beta1/doc.go | 2 + .../api/extensions/v1beta1/generated.proto | 5 +- .../k8s.io/api/extensions/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1beta1/zz_generated.validations.go | 78 + .../k8s.io/api/networking/v1/generated.proto | 7 +- e2e/vendor/k8s.io/api/networking/v1/types.go | 7 +- .../v1/types_swagger_doc_generated.go | 2 +- .../api/networking/v1alpha1/generated.pb.go | 1929 --- .../api/networking/v1alpha1/generated.proto | 142 - .../k8s.io/api/networking/v1alpha1/types.go | 154 - .../v1alpha1/types_swagger_doc_generated.go | 110 - .../networking/v1alpha1/well_known_labels.go | 33 - .../v1alpha1/zz_generated.deepcopy.go | 229 - .../zz_generated.prerelease-lifecycle.go | 94 - .../resource/v1/devicetaint.go} | 26 +- .../v1alpha1 => resource/v1}/doc.go | 9 +- .../k8s.io/api/resource/v1/generated.pb.go | 12777 ++++++++++++++++ .../k8s.io/api/resource/v1/generated.proto | 1589 ++ .../v1alpha1 => resource/v1}/register.go | 46 +- e2e/vendor/k8s.io/api/resource/v1/types.go | 1873 +++ .../v1/types_swagger_doc_generated.go | 510 + .../api/resource/v1/zz_generated.deepcopy.go | 1257 ++ .../v1/zz_generated.prerelease-lifecycle.go | 70 + .../api/resource/v1alpha3/devicetaint.go | 35 + .../api/resource/v1alpha3/generated.pb.go | 11648 +------------- .../api/resource/v1alpha3/generated.proto | 1160 +- .../k8s.io/api/resource/v1alpha3/register.go | 8 - .../k8s.io/api/resource/v1alpha3/types.go | 1534 +- .../v1alpha3/types_swagger_doc_generated.go | 395 - .../v1alpha3/zz_generated.deepcopy.go | 1099 +- .../zz_generated.prerelease-lifecycle.go | 196 - .../api/resource/v1beta1/generated.pb.go | 2280 ++- .../api/resource/v1beta1/generated.proto | 313 +- .../k8s.io/api/resource/v1beta1/types.go | 331 +- .../v1beta1/types_swagger_doc_generated.go | 102 +- .../resource/v1beta1/zz_generated.deepcopy.go | 165 + .../api/resource/v1beta2/generated.pb.go | 2520 ++- .../api/resource/v1beta2/generated.proto | 313 +- .../k8s.io/api/resource/v1beta2/types.go | 331 +- .../v1beta2/types_swagger_doc_generated.go | 104 +- .../resource/v1beta2/zz_generated.deepcopy.go | 165 + .../k8s.io/api/storage/v1/generated.pb.go | 828 +- .../k8s.io/api/storage/v1/generated.proto | 47 +- e2e/vendor/k8s.io/api/storage/v1/register.go | 3 + e2e/vendor/k8s.io/api/storage/v1/types.go | 59 +- .../storage/v1/types_swagger_doc_generated.go | 27 +- .../api/storage/v1/zz_generated.deepcopy.go | 66 + .../v1/zz_generated.prerelease-lifecycle.go | 12 + .../k8s.io/api/storage/v1alpha1/types.go | 4 + .../zz_generated.prerelease-lifecycle.go | 12 + .../api/storage/v1beta1/generated.proto | 7 +- .../k8s.io/api/storage/v1beta1/types.go | 11 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- .../zz_generated.prerelease-lifecycle.go | 12 + .../pkg/apis/apiextensions/v1/conversion.go | 2 +- .../pkg/apis/apiextensions/v1/defaults.go | 4 +- .../pkg/features/kube_features.go | 14 +- .../internalversion/validation/validation.go | 76 - .../v1/mutatingwebhookconfiguration.go | 17 + .../v1/validatingadmissionpolicy.go | 17 + .../v1/validatingadmissionpolicybinding.go | 17 + .../v1/validatingwebhookconfiguration.go | 17 + .../v1alpha1/mutatingadmissionpolicy.go | 17 + .../mutatingadmissionpolicybinding.go | 17 + .../v1alpha1/validatingadmissionpolicy.go | 17 + .../validatingadmissionpolicybinding.go | 17 + .../v1beta1/applyconfiguration.go | 39 + .../v1beta1/jsonpatch.go | 39 + .../v1beta1/mutatingadmissionpolicy.go | 270 + .../v1beta1/mutatingadmissionpolicybinding.go | 270 + .../mutatingadmissionpolicybindingspec.go | 57 + .../v1beta1/mutatingadmissionpolicyspec.go | 113 + .../v1beta1/mutatingwebhook.go | 27 +- .../v1beta1/mutatingwebhookconfiguration.go | 17 + .../admissionregistration/v1beta1/mutation.go | 61 + .../v1beta1/validatingadmissionpolicy.go | 17 + .../validatingadmissionpolicybinding.go | 17 + .../v1beta1/validatingwebhookconfiguration.go | 17 + .../v1alpha1/storageversion.go | 17 + .../apps/v1/controllerrevision.go | 17 + .../applyconfigurations/apps/v1/daemonset.go | 17 + .../applyconfigurations/apps/v1/deployment.go | 17 + .../applyconfigurations/apps/v1/replicaset.go | 17 + .../apps/v1/statefulset.go | 17 + .../apps/v1beta1/controllerrevision.go | 17 + .../apps/v1beta1/deployment.go | 17 + .../apps/v1beta1/statefulset.go | 17 + .../apps/v1beta2/controllerrevision.go | 17 + .../apps/v1beta2/daemonset.go | 17 + .../apps/v1beta2/deployment.go | 17 + .../apps/v1beta2/replicaset.go | 17 + .../applyconfigurations/apps/v1beta2/scale.go | 17 + .../apps/v1beta2/statefulset.go | 17 + .../autoscaling/v1/horizontalpodautoscaler.go | 17 + .../autoscaling/v1/scale.go | 17 + .../autoscaling/v2/horizontalpodautoscaler.go | 17 + .../v2beta1/horizontalpodautoscaler.go | 17 + .../v2beta2/horizontalpodautoscaler.go | 17 + .../applyconfigurations/batch/v1/cronjob.go | 17 + .../applyconfigurations/batch/v1/job.go | 17 + .../batch/v1/jobtemplatespec.go | 6 + .../batch/v1beta1/cronjob.go | 17 + .../batch/v1beta1/jobtemplatespec.go | 6 + .../v1/certificatesigningrequest.go | 17 + .../v1alpha1/clustertrustbundle.go | 17 + .../v1alpha1/podcertificaterequest.go} | 109 +- .../v1alpha1/podcertificaterequestspec.go | 128 + .../v1alpha1/podcertificaterequeststatus.go | 85 + .../v1beta1/certificatesigningrequest.go | 17 + .../v1beta1/clustertrustbundle.go | 17 + .../coordination/v1/lease.go | 17 + .../coordination/v1alpha2/leasecandidate.go | 17 + .../coordination/v1beta1/lease.go | 17 + .../coordination/v1beta1/leasecandidate.go | 17 + .../core/v1/componentstatus.go | 17 + .../applyconfigurations/core/v1/configmap.go | 17 + .../applyconfigurations/core/v1/container.go | 14 + .../v1/containerextendedresourcerequest.go | 57 + .../core/v1/containerrestartrule.go | 52 + .../v1/containerrestartruleonexitcodes.go | 54 + .../applyconfigurations/core/v1/endpoints.go | 17 + .../core/v1/envvarsource.go | 9 + .../core/v1/ephemeralcontainer.go | 13 + .../core/v1/ephemeralcontainercommon.go | 14 + .../applyconfigurations/core/v1/event.go | 17 + .../core/v1/filekeyselector.go | 66 + .../applyconfigurations/core/v1/limitrange.go | 17 + .../applyconfigurations/core/v1/namespace.go | 17 + .../applyconfigurations/core/v1/node.go | 17 + .../core/v1/persistentvolume.go | 17 + .../core/v1/persistentvolumeclaim.go | 17 + .../core/v1/persistentvolumeclaimtemplate.go | 6 + .../applyconfigurations/core/v1/pod.go | 17 + .../core/v1/podcertificateprojection.go | 84 + .../core/v1/podextendedresourceclaimstatus.go | 53 + .../applyconfigurations/core/v1/podspec.go | 9 + .../applyconfigurations/core/v1/podstatus.go | 43 +- .../core/v1/podtemplate.go | 17 + .../core/v1/podtemplatespec.go | 6 + .../core/v1/replicationcontroller.go | 17 + .../core/v1/resourcequota.go | 17 + .../applyconfigurations/core/v1/secret.go | 17 + .../applyconfigurations/core/v1/service.go | 17 + .../core/v1/serviceaccount.go | 17 + .../core/v1/volumeprojection.go | 9 + .../discovery/v1/endpointslice.go | 17 + .../discovery/v1beta1/endpointslice.go | 17 + .../applyconfigurations/events/v1/event.go | 17 + .../events/v1beta1/event.go | 17 + .../extensions/v1beta1/daemonset.go | 17 + .../extensions/v1beta1/deployment.go | 17 + .../extensions/v1beta1/ingress.go | 17 + .../extensions/v1beta1/networkpolicy.go | 17 + .../extensions/v1beta1/replicaset.go | 17 + .../extensions/v1beta1/scale.go | 17 + .../flowcontrol/v1/flowschema.go | 17 + .../v1/prioritylevelconfiguration.go | 17 + .../flowcontrol/v1beta1/flowschema.go | 17 + .../v1beta1/prioritylevelconfiguration.go | 17 + .../flowcontrol/v1beta2/flowschema.go | 17 + .../v1beta2/prioritylevelconfiguration.go | 17 + .../flowcontrol/v1beta3/flowschema.go | 17 + .../v1beta3/prioritylevelconfiguration.go | 17 + .../applyconfigurations/internal/internal.go | 1057 +- .../meta/v1/deleteoptions.go | 11 + .../applyconfigurations/meta/v1/objectmeta.go | 5 + .../applyconfigurations/meta/v1/typemeta.go | 10 + .../meta/v1/unstructured.go | 2 +- .../networking/v1/ingress.go | 17 + .../networking/v1/ingressclass.go | 17 + .../networking/v1/ipaddress.go | 17 + .../networking/v1/networkpolicy.go | 17 + .../networking/v1/servicecidr.go | 17 + .../networking/v1alpha1/ipaddress.go | 253 - .../networking/v1alpha1/ipaddressspec.go | 39 - .../networking/v1alpha1/parentreference.go | 66 - .../networking/v1alpha1/servicecidrspec.go | 41 - .../networking/v1alpha1/servicecidrstatus.go | 48 - .../networking/v1beta1/ingress.go | 17 + .../networking/v1beta1/ingressclass.go | 17 + .../networking/v1beta1/ipaddress.go | 17 + .../networking/v1beta1/servicecidr.go | 17 + .../node/v1/runtimeclass.go | 17 + .../node/v1alpha1/runtimeclass.go | 17 + .../node/v1beta1/runtimeclass.go | 17 + .../applyconfigurations/policy/v1/eviction.go | 17 + .../policy/v1/poddisruptionbudget.go | 17 + .../policy/v1beta1/eviction.go | 17 + .../policy/v1beta1/poddisruptionbudget.go | 17 + .../rbac/v1/clusterrole.go | 17 + .../rbac/v1/clusterrolebinding.go | 17 + .../applyconfigurations/rbac/v1/role.go | 17 + .../rbac/v1/rolebinding.go | 17 + .../rbac/v1alpha1/clusterrole.go | 17 + .../rbac/v1alpha1/clusterrolebinding.go | 17 + .../applyconfigurations/rbac/v1alpha1/role.go | 17 + .../rbac/v1alpha1/rolebinding.go | 17 + .../rbac/v1beta1/clusterrole.go | 17 + .../rbac/v1beta1/clusterrolebinding.go | 17 + .../applyconfigurations/rbac/v1beta1/role.go | 17 + .../rbac/v1beta1/rolebinding.go | 17 + .../{v1alpha3 => v1}/allocateddevicestatus.go | 17 +- .../{v1alpha3 => v1}/allocationresult.go | 20 +- .../resource/v1/capacityrequestpolicy.go | 63 + .../resource/v1/capacityrequestpolicyrange.go | 61 + .../resource/v1/capacityrequirements.go | 50 + .../resource/v1/celdeviceselector.go | 39 + .../resource/{v1alpha3 => v1}/counter.go | 2 +- .../resource/{v1alpha3 => v1}/counterset.go | 2 +- .../applyconfigurations/resource/v1/device.go | 169 + .../deviceallocationconfiguration.go | 10 +- .../deviceallocationresult.go | 2 +- .../{v1alpha3 => v1}/deviceattribute.go | 2 +- .../resource/v1/devicecapacity.go | 52 + .../resource/{v1alpha3 => v1}/deviceclaim.go | 2 +- .../deviceclaimconfiguration.go | 2 +- .../resource/{v1alpha3 => v1}/deviceclass.go | 51 +- .../deviceclassconfiguration.go | 2 +- .../{v1alpha3 => v1}/deviceclassspec.go | 15 +- .../{v1alpha3 => v1}/deviceconfiguration.go | 2 +- .../{v1alpha3 => v1}/deviceconstraint.go | 19 +- .../devicecounterconsumption.go | 2 +- .../resource/v1/devicerequest.go | 62 + .../devicerequestallocationresult.go | 66 +- .../resource/v1/deviceselector.go | 39 + .../{v1alpha3 => v1}/devicesubrequest.go | 27 +- .../resource/v1/devicetaint.go | 71 + .../{v1alpha3 => v1}/devicetoleration.go | 18 +- .../exactdevicerequest.go} | 70 +- .../{v1alpha3 => v1}/networkdevicedata.go | 2 +- .../opaquedeviceconfiguration.go | 2 +- .../{v1alpha3 => v1}/resourceclaim.go | 53 +- .../resourceclaimconsumerreference.go | 2 +- .../{v1alpha3 => v1}/resourceclaimspec.go | 2 +- .../{v1alpha3 => v1}/resourceclaimstatus.go | 2 +- .../{v1alpha3 => v1}/resourceclaimtemplate.go | 51 +- .../resourceclaimtemplatespec.go | 24 +- .../resource/{v1alpha3 => v1}/resourcepool.go | 2 +- .../{v1alpha3 => v1}/resourceslice.go | 51 +- .../{v1alpha3 => v1}/resourceslicespec.go | 22 +- .../resource/v1alpha3/basicdevice.go | 121 - .../resource/v1alpha3/device.go | 48 - .../resource/v1alpha3/devicetaintrule.go | 17 + .../resource/v1beta1/allocateddevicestatus.go | 9 + .../resource/v1beta1/allocationresult.go | 14 +- .../resource/v1beta1/basicdevice.go | 54 +- .../resource/v1beta1/capacityrequestpolicy.go | 63 + .../v1beta1/capacityrequestpolicyrange.go | 61 + .../resource/v1beta1/capacityrequirements.go | 50 + .../resource/v1beta1/devicecapacity.go | 11 +- .../resource/v1beta1/deviceclass.go | 17 + .../resource/v1beta1/deviceclassspec.go | 13 +- .../resource/v1beta1/deviceconstraint.go | 13 +- .../resource/v1beta1/devicerequest.go | 25 +- .../v1beta1/devicerequestallocationresult.go | 64 +- .../resource/v1beta1/devicesubrequest.go | 21 +- .../resource/v1beta1/resourceclaim.go | 17 + .../resource/v1beta1/resourceclaimtemplate.go | 17 + .../v1beta1/resourceclaimtemplatespec.go | 6 + .../resource/v1beta1/resourceslice.go | 17 + .../resource/v1beta2/allocateddevicestatus.go | 9 + .../resource/v1beta2/allocationresult.go | 14 +- .../resource/v1beta2/capacityrequestpolicy.go | 63 + .../v1beta2/capacityrequestpolicyrange.go | 61 + .../resource/v1beta2/capacityrequirements.go | 50 + .../resource/v1beta2/device.go | 56 +- .../resource/v1beta2/devicecapacity.go | 11 +- .../resource/v1beta2/deviceclass.go | 17 + .../resource/v1beta2/deviceclassspec.go | 13 +- .../resource/v1beta2/deviceconstraint.go | 13 +- .../v1beta2/devicerequestallocationresult.go | 64 +- .../resource/v1beta2/devicesubrequest.go | 21 +- .../resource/v1beta2/exactdevicerequest.go | 21 +- .../resource/v1beta2/resourceclaim.go | 17 + .../resource/v1beta2/resourceclaimtemplate.go | 17 + .../v1beta2/resourceclaimtemplatespec.go | 6 + .../resource/v1beta2/resourceslice.go | 17 + .../scheduling/v1/priorityclass.go | 17 + .../scheduling/v1alpha1/priorityclass.go | 17 + .../scheduling/v1beta1/priorityclass.go | 17 + .../storage/v1/csidriver.go | 17 + .../applyconfigurations/storage/v1/csinode.go | 17 + .../storage/v1/csistoragecapacity.go | 17 + .../storage/v1/storageclass.go | 17 + .../storage/v1/volumeattachment.go | 17 + .../storage/v1/volumeattributesclass.go | 285 + .../storage/v1alpha1/csistoragecapacity.go | 17 + .../storage/v1alpha1/volumeattachment.go | 17 + .../storage/v1alpha1/volumeattributesclass.go | 17 + .../storage/v1beta1/csidriver.go | 17 + .../storage/v1beta1/csinode.go | 17 + .../storage/v1beta1/csistoragecapacity.go | 17 + .../storage/v1beta1/storageclass.go | 17 + .../storage/v1beta1/volumeattachment.go | 17 + .../storage/v1beta1/volumeattributesclass.go | 17 + .../v1alpha1/storageversionmigration.go | 17 + e2e/vendor/k8s.io/client-go/dynamic/simple.go | 46 +- .../client-go/features/known_features.go | 31 +- e2e/vendor/k8s.io/client-go/gentype/type.go | 39 - .../v1beta1/interface.go | 14 + .../v1beta1/mutatingadmissionpolicy.go | 101 + .../v1beta1/mutatingadmissionpolicybinding.go | 101 + .../certificates/v1alpha1/interface.go | 7 + .../v1alpha1/podcertificaterequest.go | 102 + .../k8s.io/client-go/informers/generic.go | 34 +- .../informers/networking/interface.go | 8 - .../networking/v1alpha1/interface.go | 52 - .../networking/v1alpha1/ipaddress.go | 101 - .../networking/v1alpha1/servicecidr.go | 101 - .../client-go/informers/resource/interface.go | 8 + .../resource/{v1alpha3 => v1}/deviceclass.go | 34 +- .../informers/resource/v1/interface.go | 66 + .../{v1alpha3 => v1}/resourceclaim.go | 34 +- .../{v1alpha3 => v1}/resourceclaimtemplate.go | 34 +- .../{v1alpha3 => v1}/resourceslice.go | 34 +- .../informers/resource/v1alpha3/interface.go | 28 - .../informers/storage/v1/interface.go | 7 + .../storage/v1/volumeattributesclass.go | 101 + .../k8s.io/client-go/kubernetes/clientset.go | 26 +- .../client-go/kubernetes/scheme/register.go | 4 +- .../v1beta1/admissionregistration_client.go | 10 + .../v1beta1/generated_expansion.go | 4 + .../v1beta1/mutatingadmissionpolicy.go | 75 + .../v1beta1/mutatingadmissionpolicybinding.go | 75 + .../v1alpha1/certificates_client.go | 5 + .../v1alpha1/generated_expansion.go | 2 + .../v1alpha1/podcertificaterequest.go | 79 + .../typed/networking/v1alpha1/ipaddress.go | 71 - .../typed/networking/v1alpha1/servicecidr.go | 75 - .../typed/resource/v1/deviceclass.go | 71 + .../v1alpha1 => resource/v1}/doc.go | 2 +- .../v1}/generated_expansion.go | 10 +- .../v1/resource_client.go} | 56 +- .../typed/resource/v1/resourceclaim.go | 75 + .../resource/v1/resourceclaimtemplate.go | 71 + .../typed/resource/v1/resourceslice.go | 71 + .../typed/resource/v1alpha3/deviceclass.go | 71 - .../resource/v1alpha3/generated_expansion.go | 8 - .../resource/v1alpha3/resource_client.go | 20 - .../typed/resource/v1alpha3/resourceclaim.go | 75 - .../v1alpha3/resourceclaimtemplate.go | 73 - .../typed/resource/v1alpha3/resourceslice.go | 71 - .../typed/storage/v1/generated_expansion.go | 2 + .../typed/storage/v1/storage_client.go | 5 + .../typed/storage/v1/volumeattributesclass.go | 71 + .../v1beta1/expansion_generated.go | 8 + .../v1beta1/mutatingadmissionpolicy.go | 48 + .../v1beta1/mutatingadmissionpolicybinding.go | 48 + .../v1alpha1/expansion_generated.go | 8 + .../v1alpha1/podcertificaterequest.go | 70 + .../listers/networking/v1alpha1/ipaddress.go | 48 - .../networking/v1alpha1/servicecidr.go | 48 - .../resource/{v1alpha3 => v1}/deviceclass.go | 12 +- .../resource/v1/expansion_generated.go | 43 + .../{v1alpha3 => v1}/resourceclaim.go | 18 +- .../{v1alpha3 => v1}/resourceclaimtemplate.go | 18 +- .../{v1alpha3 => v1}/resourceslice.go | 12 +- .../resource/v1alpha3/expansion_generated.go | 24 - .../listers/storage/v1/expansion_generated.go | 4 + .../storage/v1/volumeattributesclass.go | 48 + e2e/vendor/k8s.io/client-go/openapi/client.go | 4 + .../k8s.io/client-go/pkg/version/base.go | 8 +- e2e/vendor/k8s.io/client-go/rest/request.go | 211 +- .../k8s.io/client-go/testing/fixture.go | 79 +- .../client-go/tools/cache/controller.go | 16 +- .../client-go/tools/cache/delta_fifo.go | 5 + .../k8s.io/client-go/tools/cache/listers.go | 8 + .../k8s.io/client-go/tools/cache/listwatch.go | 4 + .../k8s.io/client-go/tools/cache/reflector.go | 109 +- .../client-go/tools/cache/shared_informer.go | 1 - .../k8s.io/client-go/tools/cache/store.go | 37 +- .../client-go/tools/cache/the_real_fifo.go | 10 +- .../client-go/tools/clientcmd/api/types.go | 17 +- .../client-go/tools/clientcmd/api/v1/types.go | 7 +- .../client-go/tools/remotecommand/fallback.go | 2 +- .../tools/remotecommand/remotecommand.go | 2 +- .../client-go/tools/remotecommand/spdy.go | 6 +- .../client-go/tools/remotecommand/v1.go | 7 +- .../client-go/tools/remotecommand/v2.go | 7 +- .../client-go/tools/remotecommand/v3.go | 7 +- .../client-go/tools/remotecommand/v4.go | 7 +- .../client-go/tools/remotecommand/v5.go | 4 +- .../tools/remotecommand/websocket.go | 26 +- .../k8s.io/client-go/tools/watch/until.go | 2 +- .../data_consistency_detector.go | 22 +- .../list_data_consistency_detector.go | 76 - .../watch_list_data_consistency_detector.go | 54 - .../k8s.io/client-go/util/retry/util.go | 2 +- .../client-go/util/watchlist/watch_list.go | 82 - .../k8s.io/client-go/util/workqueue/doc.go | 1 + .../k8s.io/client-go/util/workqueue/queue.go | 73 +- e2e/vendor/k8s.io/kubelet/pkg/apis/OWNERS | 10 + .../check_hostProbesAndhostLifecycle.go | 165 + e2e/vendor/k8s.io/utils/pointer/OWNERS | 10 - e2e/vendor/k8s.io/utils/pointer/README.md | 3 - e2e/vendor/k8s.io/utils/pointer/pointer.go | 249 - e2e/vendor/modules.txt | 50 +- .../structured-merge-diff/v4/LICENSE | 201 - .../structured-merge-diff/v4/fieldpath/doc.go | 21 - .../v4/fieldpath/element.go | 317 - .../v4/fieldpath/fromvalue.go | 134 - .../v4/fieldpath/managers.go | 144 - .../v4/fieldpath/path.go | 118 - .../v4/fieldpath/pathelementmap.go | 114 - .../v4/fieldpath/serialize-pe.go | 168 - .../v4/fieldpath/serialize.go | 238 - .../structured-merge-diff/v4/fieldpath/set.go | 782 - .../structured-merge-diff/v4/schema/doc.go | 28 - .../v4/schema/elements.go | 375 - .../structured-merge-diff/v4/schema/equals.go | 202 - .../v4/schema/schemaschema.go | 165 - .../structured-merge-diff/v4/typed/compare.go | 470 - .../structured-merge-diff/v4/typed/doc.go | 18 - .../structured-merge-diff/v4/typed/helpers.go | 259 - .../structured-merge-diff/v4/typed/merge.go | 427 - .../structured-merge-diff/v4/typed/parser.go | 151 - .../v4/typed/reconcile_schema.go | 290 - .../structured-merge-diff/v4/typed/remove.go | 165 - .../v4/typed/tofieldset.go | 190 - .../structured-merge-diff/v4/typed/typed.go | 294 - .../v4/typed/validate.go | 205 - .../v4/value/allocator.go | 203 - .../structured-merge-diff/v4/value/doc.go | 21 - .../structured-merge-diff/v4/value/fields.go | 97 - .../v4/value/jsontagutil.go | 91 - .../structured-merge-diff/v4/value/list.go | 139 - .../v4/value/listreflect.go | 98 - .../v4/value/listunstructured.go | 74 - .../structured-merge-diff/v4/value/map.go | 270 - .../v4/value/mapreflect.go | 209 - .../v4/value/mapunstructured.go | 190 - .../v4/value/reflectcache.go | 484 - .../structured-merge-diff/v4/value/scalar.go | 50 - .../v4/value/structreflect.go | 208 - .../structured-merge-diff/v4/value/value.go | 347 - .../v4/value/valuereflect.go | 294 - .../v4/value/valueunstructured.go | 178 - .../sigs.k8s.io/yaml/goyaml.v2/README.md | 71 - .../yaml/goyaml.v2/yaml_aliases.go | 85 - 481 files changed, 46406 insertions(+), 34160 deletions(-) create mode 100644 e2e/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go delete mode 100644 e2e/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go delete mode 100644 e2e/vendor/k8s.io/api/networking/v1alpha1/generated.proto delete mode 100644 e2e/vendor/k8s.io/api/networking/v1alpha1/types.go delete mode 100644 e2e/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go delete mode 100644 e2e/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go delete mode 100644 e2e/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go delete mode 100644 e2e/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go rename e2e/vendor/k8s.io/{client-go/listers/networking/v1alpha1/expansion_generated.go => api/resource/v1/devicetaint.go} (50%) rename e2e/vendor/k8s.io/api/{networking/v1alpha1 => resource/v1}/doc.go (83%) create mode 100644 e2e/vendor/k8s.io/api/resource/v1/generated.pb.go create mode 100644 e2e/vendor/k8s.io/api/resource/v1/generated.proto rename e2e/vendor/k8s.io/api/{networking/v1alpha1 => resource/v1}/register.go (54%) create mode 100644 e2e/vendor/k8s.io/api/resource/v1/types.go create mode 100644 e2e/vendor/k8s.io/api/resource/v1/types_swagger_doc_generated.go create mode 100644 e2e/vendor/k8s.io/api/resource/v1/zz_generated.deepcopy.go create mode 100644 e2e/vendor/k8s.io/api/resource/v1/zz_generated.prerelease-lifecycle.go create mode 100644 e2e/vendor/k8s.io/api/resource/v1alpha3/devicetaint.go delete mode 100644 e2e/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/applyconfiguration.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/jsonpatch.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicy.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybindingspec.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicyspec.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutation.go rename e2e/vendor/k8s.io/client-go/applyconfigurations/{networking/v1alpha1/servicecidr.go => certificates/v1alpha1/podcertificaterequest.go} (62%) create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequestspec.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequeststatus.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerextendedresourcerequest.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartrule.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartruleonexitcodes.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/core/v1/filekeyselector.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcertificateprojection.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/core/v1/podextendedresourceclaimstatus.go delete mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go delete mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go delete mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go delete mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go delete mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/allocateddevicestatus.go (85%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/allocationresult.go (66%) create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicy.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicyrange.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequirements.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1/celdeviceselector.go rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/counter.go (98%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/counterset.go (99%) create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1/device.go rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceallocationconfiguration.go (89%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceallocationresult.go (99%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceattribute.go (99%) create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecapacity.go rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclaim.go (99%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclaimconfiguration.go (99%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclass.go (86%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclassconfiguration.go (98%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclassspec.go (73%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceconfiguration.go (98%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceconstraint.go (69%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/devicecounterconsumption.go (99%) create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequest.go rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/devicerequestallocationresult.go (52%) create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceselector.go rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/devicesubrequest.go (76%) create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetaint.go rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/devicetoleration.go (82%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3/devicerequest.go => v1/exactdevicerequest.go} (52%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/networkdevicedata.go (99%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/opaquedeviceconfiguration.go (99%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaim.go (86%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimconsumerreference.go (99%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimspec.go (98%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimstatus.go (99%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimtemplate.go (86%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimtemplatespec.go (91%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourcepool.go (99%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceslice.go (86%) rename e2e/vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceslicespec.go (84%) delete mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go delete mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicy.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicyrange.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequirements.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicy.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicyrange.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequirements.go create mode 100644 e2e/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattributesclass.go create mode 100644 e2e/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicy.go create mode 100644 e2e/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go create mode 100644 e2e/vendor/k8s.io/client-go/informers/certificates/v1alpha1/podcertificaterequest.go delete mode 100644 e2e/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go delete mode 100644 e2e/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go delete mode 100644 e2e/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go rename e2e/vendor/k8s.io/client-go/informers/resource/{v1alpha3 => v1}/deviceclass.go (71%) create mode 100644 e2e/vendor/k8s.io/client-go/informers/resource/v1/interface.go rename e2e/vendor/k8s.io/client-go/informers/resource/{v1alpha3 => v1}/resourceclaim.go (71%) rename e2e/vendor/k8s.io/client-go/informers/resource/{v1alpha3 => v1}/resourceclaimtemplate.go (70%) rename e2e/vendor/k8s.io/client-go/informers/resource/{v1alpha3 => v1}/resourceslice.go (71%) create mode 100644 e2e/vendor/k8s.io/client-go/informers/storage/v1/volumeattributesclass.go create mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingadmissionpolicy.go create mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go create mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/podcertificaterequest.go delete mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go delete mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go create mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/resource/v1/deviceclass.go rename e2e/vendor/k8s.io/client-go/kubernetes/typed/{networking/v1alpha1 => resource/v1}/doc.go (97%) rename e2e/vendor/k8s.io/client-go/kubernetes/typed/{networking/v1alpha1 => resource/v1}/generated_expansion.go (77%) rename e2e/vendor/k8s.io/client-go/kubernetes/typed/{networking/v1alpha1/networking_client.go => resource/v1/resource_client.go} (56%) create mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/resource/v1/resourceclaim.go create mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/resource/v1/resourceclaimtemplate.go create mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/resource/v1/resourceslice.go delete mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go delete mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go delete mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go delete mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go create mode 100644 e2e/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattributesclass.go create mode 100644 e2e/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingadmissionpolicy.go create mode 100644 e2e/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go create mode 100644 e2e/vendor/k8s.io/client-go/listers/certificates/v1alpha1/podcertificaterequest.go delete mode 100644 e2e/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go delete mode 100644 e2e/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go rename e2e/vendor/k8s.io/client-go/listers/resource/{v1alpha3 => v1}/deviceclass.go (78%) create mode 100644 e2e/vendor/k8s.io/client-go/listers/resource/v1/expansion_generated.go rename e2e/vendor/k8s.io/client-go/listers/resource/{v1alpha3 => v1}/resourceclaim.go (79%) rename e2e/vendor/k8s.io/client-go/listers/resource/{v1alpha3 => v1}/resourceclaimtemplate.go (78%) rename e2e/vendor/k8s.io/client-go/listers/resource/{v1alpha3 => v1}/resourceslice.go (77%) create mode 100644 e2e/vendor/k8s.io/client-go/listers/storage/v1/volumeattributesclass.go delete mode 100644 e2e/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go delete mode 100644 e2e/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go delete mode 100644 e2e/vendor/k8s.io/client-go/util/watchlist/watch_list.go create mode 100644 e2e/vendor/k8s.io/kubelet/pkg/apis/OWNERS create mode 100644 e2e/vendor/k8s.io/pod-security-admission/policy/check_hostProbesAndhostLifecycle.go delete mode 100644 e2e/vendor/k8s.io/utils/pointer/OWNERS delete mode 100644 e2e/vendor/k8s.io/utils/pointer/README.md delete mode 100644 e2e/vendor/k8s.io/utils/pointer/pointer.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/doc.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/element.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/fromvalue.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/path.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize-pe.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/serialize.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/doc.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/doc.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/allocator.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/doc.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/fields.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/list.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/listreflect.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/listunstructured.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/map.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/structreflect.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go delete mode 100644 e2e/vendor/sigs.k8s.io/structured-merge-diff/v4/value/valueunstructured.go delete mode 100644 e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md delete mode 100644 e2e/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go diff --git a/e2e/go.mod b/e2e/go.mod index 9e7ed10a6f1..a1179fa0066 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -21,19 +21,19 @@ require ( k8s.io/client-go v12.0.0+incompatible k8s.io/cloud-provider v0.34.1 k8s.io/kubernetes v1.34.1 - k8s.io/pod-security-admission v0.33.4 + k8s.io/pod-security-admission v0.34.1 ) replace ( - k8s.io/api => k8s.io/api v0.33.2 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.33.2 - k8s.io/client-go => k8s.io/client-go v0.33.2 - k8s.io/cri-client => k8s.io/cri-client v0.33.2 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.2 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.2 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.2 - k8s.io/kubectl => k8s.io/kubectl v0.33.2 - k8s.io/kubelet => k8s.io/kubelet v0.33.2 + k8s.io/api => k8s.io/api v0.34.1 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.34.1 + k8s.io/client-go => k8s.io/client-go v0.34.1 + k8s.io/cri-client => k8s.io/cri-client v0.34.1 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.34.1 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.34.1 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.34.1 + k8s.io/kubectl => k8s.io/kubectl v0.34.1 + k8s.io/kubelet => k8s.io/kubelet v0.34.1 ) require ( @@ -129,7 +129,6 @@ require ( sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/e2e/go.sum b/e2e/go.sum index fed88a7fd23..3b4823f13c9 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -58,7 +58,6 @@ github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -264,16 +263,16 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY= -k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs= -k8s.io/apiextensions-apiserver v0.33.2 h1:6gnkIbngnaUflR3XwE1mCefN3YS8yTD631JXQhsU6M8= -k8s.io/apiextensions-apiserver v0.33.2/go.mod h1:IvVanieYsEHJImTKXGP6XCOjTwv2LUMos0YWc9O+QP8= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= -k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E= -k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= k8s.io/cloud-provider v0.34.1 h1:FS+4C1vq9pIngd/5LR5Jha1sEbn+fo0HJitgZmUyBNc= k8s.io/cloud-provider v0.34.1/go.mod h1:ghyQYfQIWZAXKNS+TEgEiQ8wPuhzIVt3wFO6rKqS/rQ= k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= @@ -282,35 +281,31 @@ k8s.io/component-helpers v0.34.1 h1:gWhH3CCdwAx5P3oJqZKb4Lg5FYZTWVbdWtOI8n9U4XY= k8s.io/component-helpers v0.34.1/go.mod h1:4VgnUH7UA/shuBur+OWoQC0xfb69sy/93ss0ybZqm3c= k8s.io/controller-manager v0.34.1 h1:c9Cmun/zF740kmdRQWPGga+4MglT5SlrwsCXDS/KtJI= k8s.io/controller-manager v0.34.1/go.mod h1:fGiJDhi3OSzSAB4f40ZkJLAqMQSag9RM+7m5BRhBO3Q= -k8s.io/csi-translation-lib v0.33.2 h1:QyWkVcf0rbNjc53uAqCyl9kmHCRn1O0Z4QT69y/jwHQ= -k8s.io/csi-translation-lib v0.33.2/go.mod h1:nFPX6BA20EDdIQpitb6p2wVtvLBuXsmm6D1Cwi3rDnE= +k8s.io/csi-translation-lib v0.34.1 h1:8+QMIWBwPGFsqWw9eAvimA2GaHXGgLLYT61I1NzDnXw= +k8s.io/csi-translation-lib v0.34.1/go.mod h1:QXytPJ1KzYQaiMgVm82ANG+RGAUf276m8l9gFT+R6Xg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/kubectl v0.33.2 h1:7XKZ6DYCklu5MZQzJe+CkCjoGZwD1wWl7t/FxzhMz7Y= -k8s.io/kubectl v0.33.2/go.mod h1:8rC67FB8tVTYraovAGNi/idWIK90z2CHFNMmGJZJ3KI= -k8s.io/kubelet v0.33.2 h1:wxEau5/563oJb3j3KfrCKlNWWx35YlSgDLOYUBCQ0pg= -k8s.io/kubelet v0.33.2/go.mod h1:way8VCDTUMiX1HTOvJv7M3xS/xNysJI6qh7TOqMe5KM= +k8s.io/kubectl v0.34.1 h1:1qP1oqT5Xc93K+H8J7ecpBjaz511gan89KO9Vbsh/OI= +k8s.io/kubectl v0.34.1/go.mod h1:JRYlhJpGPyk3dEmJ+BuBiOB9/dAvnrALJEiY/C5qa6A= +k8s.io/kubelet v0.34.1 h1:doAaTA9/Yfzbdq/u/LveZeONp96CwX9giW6b+oHn4m4= +k8s.io/kubelet v0.34.1/go.mod h1:PtV3Ese8iOM19gSooFoQT9iyRisbmJdAPuDImuccbbA= k8s.io/kubernetes v1.34.1 h1:F3p8dtpv+i8zQoebZeK5zBqM1g9x1aIdnA5vthvcuUk= k8s.io/kubernetes v1.34.1/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto= k8s.io/mount-utils v0.34.1 h1:zMBEFav8Rxwm54S8srzy5FxAc4KQ3X4ZcjnqTCzHmZk= k8s.io/mount-utils v0.34.1/go.mod h1:MIjjYlqJ0ziYQg0MO09kc9S96GIcMkhF/ay9MncF0GA= -k8s.io/pod-security-admission v0.33.4 h1:adSwY7a/Q4Eoj+uCUfav90xRe6mB8waF0HAZ4gZeWD0= -k8s.io/pod-security-admission v0.33.4/go.mod h1:K+4JaqBz5yqE7TXf3g5zEiBLcu9RdW2BjTWydFEror4= +k8s.io/pod-security-admission v0.34.1 h1:XsP5eh8qCj69hK0a5TBMU4Ed7Ckn8JEmmbk/iepj+XM= +k8s.io/pod-security-admission v0.34.1/go.mod h1:87yY36Gxc8Hjx24FxqAD5zMY4k0tP0u7Mu/XuwXEbmg= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 261ae41bd03..bf1ae594882 100644 --- a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -25,6 +25,7 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1" v11 "k8s.io/api/admissionregistration/v1" k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,10 +47,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} } +func (*ApplyConfiguration) ProtoMessage() {} +func (*ApplyConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{0} +} +func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ApplyConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyConfiguration.Merge(m, src) +} +func (m *ApplyConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ApplyConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo + func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } func (*AuditAnnotation) ProtoMessage() {} func (*AuditAnnotation) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{0} + return fileDescriptor_7f7c65a4f012fb19, []int{1} } func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +106,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } func (*ExpressionWarning) ProtoMessage() {} func (*ExpressionWarning) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{1} + return fileDescriptor_7f7c65a4f012fb19, []int{2} } func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,10 +131,38 @@ func (m *ExpressionWarning) XXX_DiscardUnknown() { var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo +func (m *JSONPatch) Reset() { *m = JSONPatch{} } +func (*JSONPatch) ProtoMessage() {} +func (*JSONPatch) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{3} +} +func (m *JSONPatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONPatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONPatch.Merge(m, src) +} +func (m *JSONPatch) XXX_Size() int { + return m.Size() +} +func (m *JSONPatch) XXX_DiscardUnknown() { + xxx_messageInfo_JSONPatch.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONPatch proto.InternalMessageInfo + func (m *MatchCondition) Reset() { *m = MatchCondition{} } func (*MatchCondition) ProtoMessage() {} func (*MatchCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{2} + return fileDescriptor_7f7c65a4f012fb19, []int{4} } func (m *MatchCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +190,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo func (m *MatchResources) Reset() { *m = MatchResources{} } func (*MatchResources) ProtoMessage() {} func (*MatchResources) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{3} + return fileDescriptor_7f7c65a4f012fb19, []int{5} } func (m *MatchResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,10 +215,178 @@ func (m *MatchResources) XXX_DiscardUnknown() { var xxx_messageInfo_MatchResources proto.InternalMessageInfo +func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} } +func (*MutatingAdmissionPolicy) ProtoMessage() {} +func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{6} +} +func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src) +} +func (m *MutatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} } +func (*MutatingAdmissionPolicyBinding) ProtoMessage() {} +func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{7} +} +func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} } +func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{8} +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} } +func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{9} +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} } +func (*MutatingAdmissionPolicyList) ProtoMessage() {} +func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{10} +} +func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src) +} +func (m *MutatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} } +func (*MutatingAdmissionPolicySpec) ProtoMessage() {} +func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{11} +} +func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src) +} +func (m *MutatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{4} + return fileDescriptor_7f7c65a4f012fb19, []int{12} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +414,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{5} + return fileDescriptor_7f7c65a4f012fb19, []int{13} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +442,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{6} + return fileDescriptor_7f7c65a4f012fb19, []int{14} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,10 +467,38 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo +func (m *Mutation) Reset() { *m = Mutation{} } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{15} +} +func (m *Mutation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Mutation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mutation.Merge(m, src) +} +func (m *Mutation) XXX_Size() int { + return m.Size() +} +func (m *Mutation) XXX_DiscardUnknown() { + xxx_messageInfo_Mutation.DiscardUnknown(m) +} + +var xxx_messageInfo_Mutation proto.InternalMessageInfo + func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } func (*NamedRuleWithOperations) ProtoMessage() {} func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{7} + return fileDescriptor_7f7c65a4f012fb19, []int{16} } func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +526,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo func (m *ParamKind) Reset() { *m = ParamKind{} } func (*ParamKind) ProtoMessage() {} func (*ParamKind) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{8} + return fileDescriptor_7f7c65a4f012fb19, []int{17} } func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +554,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo func (m *ParamRef) Reset() { *m = ParamRef{} } func (*ParamRef) ProtoMessage() {} func (*ParamRef) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{9} + return fileDescriptor_7f7c65a4f012fb19, []int{18} } func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +582,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{10} + return fileDescriptor_7f7c65a4f012fb19, []int{19} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +610,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *TypeChecking) Reset() { *m = TypeChecking{} } func (*TypeChecking) ProtoMessage() {} func (*TypeChecking) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{11} + return fileDescriptor_7f7c65a4f012fb19, []int{20} } func (m *TypeChecking) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +638,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } func (*ValidatingAdmissionPolicy) ProtoMessage() {} func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{12} + return fileDescriptor_7f7c65a4f012fb19, []int{21} } func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +666,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{13} + return fileDescriptor_7f7c65a4f012fb19, []int{22} } func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +694,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{14} + return fileDescriptor_7f7c65a4f012fb19, []int{23} } func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +722,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{15} + return fileDescriptor_7f7c65a4f012fb19, []int{24} } func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,7 +750,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } func (*ValidatingAdmissionPolicyList) ProtoMessage() {} func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{16} + return fileDescriptor_7f7c65a4f012fb19, []int{25} } func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -525,7 +778,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{17} + return fileDescriptor_7f7c65a4f012fb19, []int{26} } func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -553,7 +806,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{18} + return fileDescriptor_7f7c65a4f012fb19, []int{27} } func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,7 +834,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{19} + return fileDescriptor_7f7c65a4f012fb19, []int{28} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -609,7 +862,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{20} + return fileDescriptor_7f7c65a4f012fb19, []int{29} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -637,7 +890,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{21} + return fileDescriptor_7f7c65a4f012fb19, []int{30} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -665,7 +918,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *Validation) Reset() { *m = Validation{} } func (*Validation) ProtoMessage() {} func (*Validation) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{22} + return fileDescriptor_7f7c65a4f012fb19, []int{31} } func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -693,7 +946,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo func (m *Variable) Reset() { *m = Variable{} } func (*Variable) ProtoMessage() {} func (*Variable) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{23} + return fileDescriptor_7f7c65a4f012fb19, []int{32} } func (m *Variable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -721,7 +974,7 @@ var xxx_messageInfo_Variable proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7f7c65a4f012fb19, []int{24} + return fileDescriptor_7f7c65a4f012fb19, []int{33} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -747,13 +1000,22 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ApplyConfiguration") proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1beta1.AuditAnnotation") proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1beta1.ExpressionWarning") + proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1beta1.JSONPatch") proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition") proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchResources") + proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicy") + proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBinding") + proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingList") + proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingSpec") + proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyList") + proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicySpec") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") + proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1beta1.Mutation") proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.NamedRuleWithOperations") proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamKind") proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamRef") @@ -779,130 +1041,174 @@ func init() { } var fileDescriptor_7f7c65a4f012fb19 = []byte{ - // 1957 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7, - 0xd5, 0x2b, 0x52, 0x12, 0xf9, 0xa8, 0x2f, 0x4e, 0x9c, 0x8a, 0x76, 0x1c, 0x52, 0x58, 0x04, 0x85, - 0x0c, 0xb4, 0x64, 0xac, 0x04, 0x89, 0xeb, 0xa0, 0x28, 0x44, 0xc5, 0x76, 0xed, 0x58, 0xb2, 0x30, - 0x4a, 0x24, 0xa0, 0x4d, 0x00, 0x8f, 0x76, 0x87, 0xe4, 0x96, 0xe4, 0xee, 0x76, 0x67, 0x49, 0x5b, - 0x2d, 0xd0, 0x16, 0xe8, 0x21, 0xd7, 0x02, 0xbd, 0x14, 0xe8, 0xa9, 0x7f, 0xa1, 0xf7, 0x02, 0xed, - 0xcd, 0xc7, 0xdc, 0x6a, 0xa0, 0x28, 0x51, 0xb1, 0x87, 0x9e, 0x7a, 0xe8, 0xa1, 0x3d, 0xe8, 0xd2, - 0x62, 0x66, 0x67, 0x3f, 0xb9, 0xb4, 0x56, 0xaa, 0xac, 0x5c, 0x7c, 0xd3, 0xbe, 0xcf, 0x79, 0x6f, - 0xde, 0xd7, 0x3c, 0x0a, 0x6e, 0x77, 0x6f, 0xb3, 0xba, 0x61, 0x35, 0x88, 0x6d, 0x34, 0x88, 0xde, - 0x37, 0x18, 0x33, 0x2c, 0xd3, 0xa1, 0x6d, 0x83, 0xb9, 0x0e, 0x71, 0x0d, 0xcb, 0x6c, 0x0c, 0x6f, - 0x1d, 0x52, 0x97, 0xdc, 0x6a, 0xb4, 0xa9, 0x49, 0x1d, 0xe2, 0x52, 0xbd, 0x6e, 0x3b, 0x96, 0x6b, - 0xa1, 0x75, 0x8f, 0xb3, 0x4e, 0x6c, 0xa3, 0x9e, 0xca, 0x59, 0x97, 0x9c, 0xd7, 0xbf, 0xdd, 0x36, - 0xdc, 0xce, 0xe0, 0xb0, 0xae, 0x59, 0xfd, 0x46, 0xdb, 0x6a, 0x5b, 0x0d, 0x21, 0xe0, 0x70, 0xd0, - 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x5e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd, - 0xfd, 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xa8, 0x61, 0x77, 0xdb, 0x1c, 0xc0, 0x1a, - 0x7d, 0xea, 0x92, 0x34, 0xae, 0xc6, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0x7c, - 0x70, 0x1a, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xe5, 0xcd, 0x81, 0x6e, 0xb8, - 0x9b, 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x36, 0xe4, 0xba, 0xf4, 0xa8, 0xa2, 0xac, 0x29, 0xeb, - 0xc5, 0x66, 0xe9, 0xf9, 0xa8, 0x76, 0x65, 0x3c, 0xaa, 0xe5, 0x3e, 0xa1, 0x47, 0x98, 0xc3, 0xd1, - 0x26, 0x2c, 0x0f, 0x49, 0x6f, 0x40, 0xef, 0x3e, 0xb3, 0x1d, 0x2a, 0x5c, 0x50, 0x99, 0x11, 0xa4, - 0xab, 0x92, 0x74, 0x79, 0x3f, 0x8e, 0xc6, 0x49, 0x7a, 0xb5, 0x07, 0xe5, 0xf0, 0xeb, 0x80, 0x38, - 0xa6, 0x61, 0xb6, 0xd1, 0xb7, 0xa0, 0xd0, 0x32, 0x68, 0x4f, 0xc7, 0xb4, 0x25, 0x05, 0xae, 0x48, - 0x81, 0x85, 0x7b, 0x12, 0x8e, 0x03, 0x0a, 0x74, 0x13, 0xe6, 0x9f, 0x7a, 0x8c, 0x95, 0x9c, 0x20, - 0x5e, 0x96, 0xc4, 0xf3, 0x52, 0x1e, 0xf6, 0xf1, 0x6a, 0x0b, 0x96, 0xb6, 0x89, 0xab, 0x75, 0xb6, - 0x2c, 0x53, 0x37, 0x84, 0x85, 0x6b, 0x90, 0x37, 0x49, 0x9f, 0x4a, 0x13, 0x17, 0x24, 0x67, 0x7e, - 0x87, 0xf4, 0x29, 0x16, 0x18, 0xb4, 0x01, 0x40, 0x93, 0xf6, 0x21, 0x49, 0x07, 0x11, 0xd3, 0x22, - 0x54, 0xea, 0x9f, 0xf3, 0x52, 0x11, 0xa6, 0xcc, 0x1a, 0x38, 0x1a, 0x65, 0xe8, 0x19, 0x94, 0xb9, - 0x38, 0x66, 0x13, 0x8d, 0xee, 0xd1, 0x1e, 0xd5, 0x5c, 0xcb, 0x11, 0x5a, 0x4b, 0x1b, 0xef, 0xd5, - 0xc3, 0x30, 0x0d, 0x6e, 0xac, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x3c, 0x30, 0xea, 0xc3, 0x5b, - 0xf5, 0x47, 0xe4, 0x90, 0xf6, 0x7c, 0xd6, 0xe6, 0x9b, 0xe3, 0x51, 0xad, 0xbc, 0x93, 0x94, 0x88, - 0x27, 0x95, 0x20, 0x0b, 0x96, 0xac, 0xc3, 0x1f, 0x51, 0xcd, 0x0d, 0xd4, 0xce, 0x9c, 0x5f, 0x2d, - 0x1a, 0x8f, 0x6a, 0x4b, 0x8f, 0x63, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x19, 0x2c, 0x3a, 0xd2, 0x6e, - 0x3c, 0xe8, 0x51, 0x56, 0xc9, 0xad, 0xe5, 0xd6, 0x4b, 0x1b, 0x9b, 0xf5, 0xac, 0xd9, 0x58, 0xe7, - 0x76, 0xe9, 0x9c, 0xf7, 0xc0, 0x70, 0x3b, 0x8f, 0x6d, 0xea, 0xa1, 0x59, 0xf3, 0x4d, 0xe9, 0xf7, - 0x45, 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x15, 0xb8, 0x4a, 0x9f, 0x69, 0xbd, 0x81, 0x4e, - 0x63, 0x74, 0x95, 0xfc, 0x45, 0x9d, 0xe3, 0x86, 0x3c, 0xc7, 0xd5, 0xbb, 0x29, 0x6a, 0x70, 0xaa, - 0x72, 0xf4, 0x31, 0x94, 0xfa, 0x3c, 0x24, 0x76, 0xad, 0x9e, 0xa1, 0x1d, 0x55, 0xe6, 0x45, 0x20, - 0xa9, 0xe3, 0x51, 0xad, 0xb4, 0x1d, 0x82, 0x4f, 0x46, 0xb5, 0xe5, 0xc8, 0xe7, 0xa7, 0x47, 0x36, - 0xc5, 0x51, 0x36, 0xf5, 0x4f, 0x05, 0x58, 0xde, 0x1e, 0xf0, 0xf4, 0x34, 0xdb, 0x07, 0xf4, 0xb0, - 0x63, 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x14, 0x16, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xcb, 0x32, 0x5b, - 0x46, 0x5b, 0x06, 0xc0, 0x77, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x8a, 0x08, 0x69, 0x5e, 0x95, 0x8a, - 0x16, 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x59, 0x27, 0x12, 0x02, 0x1f, 0x66, 0xd1, 0x58, - 0x4f, 0x71, 0xf8, 0xa2, 0xd4, 0x35, 0xeb, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc1, 0x62, 0x8b, 0x18, - 0xbd, 0x81, 0x43, 0xa5, 0x53, 0xf3, 0xc2, 0x03, 0xdf, 0xe4, 0x11, 0x72, 0x2f, 0x8a, 0x38, 0x19, - 0xd5, 0xca, 0x31, 0x80, 0x70, 0x6c, 0x9c, 0x39, 0x79, 0x41, 0xc5, 0x73, 0x5d, 0x50, 0x7a, 0x9e, - 0xcf, 0x7e, 0x3d, 0x79, 0x5e, 0x7a, 0xb5, 0x79, 0xfe, 0x31, 0x94, 0x98, 0xa1, 0xd3, 0xbb, 0xad, - 0x16, 0xd5, 0x5c, 0x56, 0x99, 0x0b, 0x1d, 0xb6, 0x17, 0x82, 0xb9, 0xc3, 0xc2, 0xcf, 0xad, 0x1e, - 0x61, 0x0c, 0x47, 0xd9, 0xd0, 0x1d, 0x58, 0xe2, 0x5d, 0xc9, 0x1a, 0xb8, 0x7b, 0x54, 0xb3, 0x4c, - 0x9d, 0x89, 0xd4, 0x98, 0xf5, 0x4e, 0xf0, 0x69, 0x0c, 0x83, 0x13, 0x94, 0xe8, 0x33, 0x58, 0x0d, - 0xa2, 0x08, 0xd3, 0xa1, 0x41, 0x9f, 0xee, 0x53, 0x87, 0x7f, 0xb0, 0x4a, 0x61, 0x2d, 0xb7, 0x5e, - 0x6c, 0xbe, 0x35, 0x1e, 0xd5, 0x56, 0x37, 0xd3, 0x49, 0xf0, 0x34, 0x5e, 0xf4, 0x04, 0x90, 0x43, - 0x0d, 0x73, 0x68, 0x69, 0x22, 0xfc, 0x64, 0x40, 0x80, 0xb0, 0xef, 0xdd, 0xf1, 0xa8, 0x86, 0xf0, - 0x04, 0xf6, 0x64, 0x54, 0xfb, 0xc6, 0x24, 0x54, 0x84, 0x47, 0x8a, 0x2c, 0xf4, 0x53, 0x58, 0xee, - 0xc7, 0x1a, 0x11, 0xab, 0x2c, 0x88, 0x0c, 0xb9, 0x9d, 0x3d, 0x27, 0xe3, 0x9d, 0x2c, 0xec, 0xb9, - 0x71, 0x38, 0xc3, 0x49, 0x4d, 0xea, 0x5f, 0x15, 0xb8, 0x91, 0xa8, 0x21, 0x5e, 0xba, 0x0e, 0x3c, - 0x0d, 0xe8, 0x09, 0x14, 0x78, 0x54, 0xe8, 0xc4, 0x25, 0xb2, 0x45, 0xbd, 0x9b, 0x2d, 0x86, 0xbc, - 0x80, 0xd9, 0xa6, 0x2e, 0x09, 0x5b, 0x64, 0x08, 0xc3, 0x81, 0x54, 0xf4, 0x43, 0x28, 0x48, 0xcd, - 0xac, 0x32, 0x23, 0x0c, 0xff, 0xce, 0x19, 0x0c, 0x8f, 0x9f, 0xbd, 0x99, 0xe7, 0xaa, 0x70, 0x20, - 0x50, 0xfd, 0xa7, 0x02, 0x6b, 0x2f, 0xb3, 0xef, 0x91, 0xc1, 0x5c, 0xf4, 0xf9, 0x84, 0x8d, 0xf5, - 0x8c, 0x79, 0x62, 0x30, 0xcf, 0xc2, 0x60, 0x26, 0xf1, 0x21, 0x11, 0xfb, 0xba, 0x30, 0x6b, 0xb8, - 0xb4, 0xef, 0x1b, 0x77, 0xef, 0xdc, 0xc6, 0xc5, 0x0e, 0x1e, 0x96, 0xc1, 0x07, 0x5c, 0x38, 0xf6, - 0x74, 0xa8, 0x2f, 0x14, 0x58, 0x9d, 0xd2, 0xa9, 0xd0, 0x87, 0x61, 0x2f, 0x16, 0x45, 0xa4, 0xa2, - 0x88, 0xbc, 0x28, 0x47, 0x9b, 0xa8, 0x40, 0xe0, 0x38, 0x1d, 0xfa, 0xa5, 0x02, 0xc8, 0x99, 0x90, - 0x27, 0x3b, 0xc7, 0xb9, 0xeb, 0xf8, 0x75, 0x69, 0x00, 0x9a, 0xc4, 0xe1, 0x14, 0x75, 0x2a, 0x81, - 0xe2, 0x2e, 0x71, 0x48, 0xff, 0x13, 0xc3, 0xd4, 0xf9, 0x24, 0x46, 0x6c, 0x43, 0x66, 0xa9, 0xec, - 0x76, 0x41, 0x98, 0x6d, 0xee, 0x3e, 0x90, 0x18, 0x1c, 0xa1, 0xe2, 0xbd, 0xb1, 0x6b, 0x98, 0xba, - 0x9c, 0xdb, 0x82, 0xde, 0xc8, 0xe5, 0x61, 0x81, 0x51, 0x7f, 0x3f, 0x03, 0x05, 0xa1, 0x83, 0xcf, - 0x92, 0xa7, 0xb7, 0xd2, 0x06, 0x14, 0x83, 0xd2, 0x2b, 0xa5, 0x96, 0x25, 0x59, 0x31, 0x28, 0xd3, - 0x38, 0xa4, 0x41, 0x5f, 0x40, 0x81, 0xf9, 0x05, 0x39, 0x77, 0xfe, 0x82, 0xbc, 0xc0, 0x23, 0x2d, - 0x28, 0xc5, 0x81, 0x48, 0xe4, 0xc2, 0xaa, 0xcd, 0x4f, 0x4f, 0x5d, 0xea, 0xec, 0x58, 0xee, 0x3d, - 0x6b, 0x60, 0xea, 0x9b, 0x1a, 0xf7, 0x9e, 0xec, 0x86, 0x77, 0x78, 0x09, 0xdc, 0x4d, 0x27, 0x39, - 0x19, 0xd5, 0xde, 0x9a, 0x82, 0x12, 0xa5, 0x6b, 0x9a, 0x68, 0xf5, 0x77, 0x0a, 0xac, 0xec, 0x51, - 0x67, 0x68, 0x68, 0x14, 0xd3, 0x16, 0x75, 0xa8, 0xa9, 0x25, 0x5c, 0xa3, 0x64, 0x70, 0x8d, 0xef, - 0xed, 0x99, 0xa9, 0xde, 0xbe, 0x01, 0x79, 0x9b, 0xb8, 0x1d, 0x39, 0xd8, 0x17, 0x38, 0x76, 0x97, - 0xb8, 0x1d, 0x2c, 0xa0, 0x02, 0x6b, 0x39, 0xae, 0x30, 0x74, 0x56, 0x62, 0x2d, 0xc7, 0xc5, 0x02, - 0xaa, 0xfe, 0x46, 0x81, 0x05, 0x6e, 0xc5, 0x56, 0x87, 0x6a, 0x5d, 0xfe, 0xac, 0xf8, 0x52, 0x01, - 0x44, 0x93, 0x8f, 0x0d, 0x2f, 0x23, 0x4a, 0x1b, 0x1f, 0x65, 0x4f, 0xd1, 0x89, 0x07, 0x4b, 0x18, - 0xd6, 0x13, 0x28, 0x86, 0x53, 0x54, 0xaa, 0x7f, 0x99, 0x81, 0x6b, 0xfb, 0xa4, 0x67, 0xe8, 0x22, - 0xd5, 0x83, 0xfe, 0x24, 0x9b, 0xc3, 0xab, 0x2f, 0xbf, 0x06, 0xe4, 0x99, 0x4d, 0x35, 0x99, 0xcd, - 0xf7, 0xb3, 0x9b, 0x3e, 0xf5, 0xd0, 0x7b, 0x36, 0xd5, 0xc2, 0x1b, 0xe4, 0x5f, 0x58, 0xa8, 0x40, - 0x3f, 0x86, 0x39, 0xe6, 0x12, 0x77, 0xc0, 0x64, 0xf0, 0x3f, 0xb8, 0x08, 0x65, 0x42, 0x60, 0x73, - 0x49, 0xaa, 0x9b, 0xf3, 0xbe, 0xb1, 0x54, 0xa4, 0xfe, 0x47, 0x81, 0xb5, 0xa9, 0xbc, 0x4d, 0xc3, - 0xd4, 0x79, 0x30, 0xbc, 0x7a, 0x27, 0xdb, 0x31, 0x27, 0xef, 0x5c, 0x80, 0xdd, 0xf2, 0xec, 0xd3, - 0x7c, 0xad, 0xfe, 0x5b, 0x81, 0x77, 0x4e, 0x63, 0xbe, 0x84, 0xe6, 0x67, 0xc5, 0x9b, 0xdf, 0xc3, - 0x8b, 0xb3, 0x7c, 0x4a, 0x03, 0xfc, 0x32, 0x77, 0xba, 0xdd, 0xdc, 0x4d, 0xbc, 0x83, 0xd8, 0x02, - 0xb8, 0x13, 0x16, 0xf9, 0xe0, 0x12, 0x77, 0x03, 0x0c, 0x8e, 0x50, 0x71, 0x5f, 0xd9, 0xb2, 0x3d, - 0xc8, 0xab, 0xdc, 0xc8, 0x6e, 0x90, 0xdf, 0x58, 0xbc, 0xf2, 0xed, 0x7f, 0xe1, 0x40, 0x22, 0x72, - 0x61, 0xa9, 0x1f, 0x5b, 0x14, 0xc8, 0x34, 0x39, 0xeb, 0x1c, 0x18, 0xf0, 0x7b, 0x73, 0x73, 0x1c, - 0x86, 0x13, 0x3a, 0xd0, 0x01, 0x94, 0x87, 0xd2, 0x5f, 0x96, 0xe9, 0x95, 0x74, 0xef, 0x75, 0x5c, - 0x6c, 0xde, 0xe4, 0xef, 0x8d, 0xfd, 0x24, 0xf2, 0x64, 0x54, 0x5b, 0x49, 0x02, 0xf1, 0xa4, 0x0c, - 0xf5, 0x1f, 0x0a, 0xbc, 0x3d, 0xf5, 0x26, 0x2e, 0x21, 0xf4, 0x3a, 0xf1, 0xd0, 0xdb, 0xba, 0x88, - 0xd0, 0x4b, 0x8f, 0xb9, 0xdf, 0xce, 0xbd, 0xc4, 0x52, 0x11, 0x6c, 0x4f, 0xa0, 0x68, 0xfb, 0xb3, - 0x4b, 0xca, 0xa6, 0x27, 0x4b, 0xe4, 0x70, 0xd6, 0xe6, 0x22, 0xef, 0x9f, 0xc1, 0x27, 0x0e, 0x85, - 0xa2, 0x9f, 0xc0, 0x8a, 0x3f, 0xdb, 0x73, 0x7e, 0xc3, 0x74, 0xfd, 0x01, 0xed, 0xfc, 0xe1, 0x73, - 0x75, 0x3c, 0xaa, 0xad, 0x6c, 0x27, 0xa4, 0xe2, 0x09, 0x3d, 0xa8, 0x0b, 0xa5, 0xf0, 0xfa, 0xfd, - 0xf7, 0xfd, 0xfb, 0x67, 0xf7, 0xb7, 0x65, 0x36, 0xdf, 0x90, 0x0e, 0x2e, 0x85, 0x30, 0x86, 0xa3, - 0xd2, 0x2f, 0xf8, 0xa1, 0xff, 0x73, 0x58, 0x21, 0xf1, 0x45, 0x27, 0xab, 0xcc, 0x9e, 0xf5, 0x11, - 0x92, 0x58, 0x95, 0x36, 0x2b, 0xd2, 0x88, 0x95, 0x04, 0x82, 0xe1, 0x09, 0x65, 0x69, 0xaf, 0xbf, - 0xb9, 0xcb, 0x7a, 0xfd, 0x21, 0x0d, 0x8a, 0x43, 0xe2, 0x18, 0xe4, 0xb0, 0x47, 0xf9, 0x53, 0x3b, - 0x77, 0xb6, 0x82, 0xb6, 0x2f, 0x59, 0xc3, 0xc9, 0xce, 0x87, 0x30, 0x1c, 0xca, 0x55, 0xff, 0x38, - 0x03, 0xb5, 0x53, 0xda, 0x37, 0x7a, 0x08, 0xc8, 0x3a, 0x64, 0xd4, 0x19, 0x52, 0xfd, 0xbe, 0xb7, - 0x8a, 0xf6, 0xc7, 0xfa, 0x5c, 0x38, 0x50, 0x3d, 0x9e, 0xa0, 0xc0, 0x29, 0x5c, 0xa8, 0x07, 0x0b, - 0x6e, 0x64, 0xd4, 0x93, 0x59, 0xf0, 0x41, 0x76, 0xbb, 0xa2, 0x83, 0x62, 0x73, 0x65, 0x3c, 0xaa, - 0xc5, 0x46, 0x47, 0x1c, 0x93, 0x8e, 0x34, 0x00, 0x2d, 0xbc, 0x3a, 0x2f, 0xf4, 0x1b, 0xd9, 0xaa, - 0x58, 0x78, 0x63, 0x41, 0xdf, 0x89, 0x5c, 0x56, 0x44, 0xac, 0x7a, 0x3c, 0x0f, 0xe5, 0xd0, 0x85, - 0xaf, 0x77, 0x7d, 0xaf, 0x77, 0x7d, 0x2f, 0xdd, 0xf5, 0xc1, 0xeb, 0x5d, 0xdf, 0xb9, 0x76, 0x7d, - 0x29, 0xb5, 0xb8, 0x74, 0x69, 0x9b, 0xb8, 0x63, 0x05, 0xaa, 0x13, 0x39, 0x7e, 0xd9, 0xbb, 0xb8, - 0x2f, 0x26, 0x76, 0x71, 0x1f, 0x9d, 0x67, 0x6c, 0x9a, 0xb6, 0x8d, 0xfb, 0x97, 0x02, 0xea, 0xcb, - 0x6d, 0xbc, 0x84, 0xb9, 0xb0, 0x1f, 0x9f, 0x0b, 0xbf, 0xff, 0x7f, 0x18, 0x98, 0x65, 0x23, 0xf7, - 0x5f, 0x05, 0x20, 0x1c, 0x66, 0xd0, 0x3b, 0x10, 0xf9, 0xa1, 0x50, 0x96, 0x6e, 0xcf, 0x4d, 0x11, - 0x38, 0xba, 0x09, 0xf3, 0x7d, 0xca, 0x18, 0x69, 0xfb, 0x0b, 0x91, 0xe0, 0x77, 0xcc, 0x6d, 0x0f, - 0x8c, 0x7d, 0x3c, 0x3a, 0x80, 0x39, 0x87, 0x12, 0x66, 0x99, 0x72, 0x31, 0xf2, 0x3d, 0xfe, 0x0a, - 0xc6, 0x02, 0x72, 0x32, 0xaa, 0xdd, 0xca, 0xf2, 0x3b, 0x73, 0x5d, 0x3e, 0x9a, 0x05, 0x13, 0x96, - 0xe2, 0xd0, 0x7d, 0x28, 0x4b, 0x1d, 0x91, 0x03, 0x7b, 0x95, 0xf6, 0x9a, 0x3c, 0x4d, 0x79, 0x3b, - 0x49, 0x80, 0x27, 0x79, 0xd4, 0x87, 0x50, 0xf0, 0x07, 0x03, 0x54, 0x81, 0x7c, 0xe4, 0xbd, 0xe5, - 0x19, 0x2e, 0x20, 0x09, 0xc7, 0xcc, 0xa4, 0x3b, 0x46, 0xfd, 0x83, 0x02, 0x6f, 0xa4, 0x34, 0x25, - 0x74, 0x0d, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0x30, 0x3f, 0x1e, 0xd5, 0x72, 0x9f, 0xe1, 0x47, 0x98, - 0xc3, 0x10, 0x81, 0x79, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xc9, 0x7e, 0xe3, 0xc9, 0xbd, 0x56, - 0xb3, 0xc4, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x3a, 0x14, 0x34, 0xd2, 0x1c, 0x98, 0x7a, 0xcf, - 0xbb, 0xaf, 0x05, 0xef, 0x8d, 0xb7, 0xb5, 0xe9, 0xc1, 0x70, 0x80, 0x6d, 0xee, 0x3c, 0x3f, 0xae, - 0x5e, 0xf9, 0xea, 0xb8, 0x7a, 0xe5, 0xc5, 0x71, 0xf5, 0xca, 0x2f, 0xc6, 0x55, 0xe5, 0xf9, 0xb8, - 0xaa, 0x7c, 0x35, 0xae, 0x2a, 0x2f, 0xc6, 0x55, 0xe5, 0x6f, 0xe3, 0xaa, 0xf2, 0xab, 0xbf, 0x57, - 0xaf, 0xfc, 0x60, 0x3d, 0xeb, 0x7f, 0x39, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf2, 0xe8, - 0x4a, 0x10, 0x21, 0x00, 0x00, + // 2215 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, + 0x15, 0xf6, 0x92, 0x92, 0x45, 0x3e, 0xca, 0x92, 0x38, 0x71, 0x2a, 0xfa, 0x8f, 0x14, 0x16, 0x41, + 0x21, 0x03, 0x2d, 0x59, 0x2b, 0x41, 0xe2, 0x3a, 0x29, 0x02, 0xae, 0x62, 0x3b, 0x76, 0x24, 0x59, + 0x18, 0x39, 0x52, 0xd1, 0x26, 0x40, 0x56, 0xcb, 0x21, 0xb9, 0x11, 0xb9, 0xcb, 0xee, 0x2c, 0x65, + 0xab, 0x05, 0xda, 0x02, 0x2d, 0x90, 0x1e, 0x0b, 0xf4, 0x52, 0xa0, 0xa7, 0xde, 0x7b, 0x69, 0xef, + 0x05, 0x7a, 0xf4, 0x31, 0xb7, 0x1a, 0x28, 0x4a, 0x54, 0x4c, 0xd1, 0x9e, 0x7a, 0x48, 0x81, 0xf6, + 0xa0, 0x4b, 0x8b, 0x99, 0x9d, 0xfd, 0xdf, 0x95, 0x56, 0xb2, 0x2c, 0x17, 0x85, 0x6f, 0xda, 0xf7, + 0xe6, 0xbd, 0x37, 0xef, 0xcd, 0x9b, 0xf7, 0xbe, 0x79, 0x22, 0xdc, 0xdc, 0xb9, 0x49, 0xeb, 0xba, + 0xd9, 0x50, 0x07, 0x7a, 0x43, 0x6d, 0xf5, 0x75, 0x4a, 0x75, 0xd3, 0xb0, 0x48, 0x47, 0xa7, 0xb6, + 0xa5, 0xda, 0xba, 0x69, 0x34, 0x76, 0x6f, 0x6c, 0x13, 0x5b, 0xbd, 0xd1, 0xe8, 0x10, 0x83, 0x58, + 0xaa, 0x4d, 0x5a, 0xf5, 0x81, 0x65, 0xda, 0x26, 0x5a, 0x74, 0x24, 0xeb, 0xea, 0x40, 0xaf, 0x27, + 0x4a, 0xd6, 0x85, 0xe4, 0xe5, 0xaf, 0x77, 0x74, 0xbb, 0x3b, 0xdc, 0xae, 0x6b, 0x66, 0xbf, 0xd1, + 0x31, 0x3b, 0x66, 0x83, 0x2b, 0xd8, 0x1e, 0xb6, 0xf9, 0x17, 0xff, 0xe0, 0x7f, 0x39, 0x8a, 0x2f, + 0xbf, 0x9e, 0x61, 0x4b, 0xd1, 0xdd, 0x5c, 0x7e, 0xc3, 0x17, 0xea, 0xab, 0x5a, 0x57, 0x37, 0x88, + 0xb5, 0xd7, 0x18, 0xec, 0x74, 0x18, 0x81, 0x36, 0xfa, 0xc4, 0x56, 0x93, 0xa4, 0x1a, 0x69, 0x52, + 0xd6, 0xd0, 0xb0, 0xf5, 0x3e, 0x89, 0x09, 0xbc, 0x79, 0x94, 0x00, 0xd5, 0xba, 0xa4, 0xaf, 0x46, + 0xe5, 0xe4, 0xf7, 0x01, 0x35, 0x07, 0x83, 0xde, 0xde, 0xb2, 0x69, 0xb4, 0xf5, 0xce, 0xd0, 0xf1, + 0x03, 0x2d, 0x01, 0x90, 0xc7, 0x03, 0x8b, 0x70, 0x0f, 0x2b, 0xd2, 0x82, 0xb4, 0x58, 0x54, 0xd0, + 0x93, 0x51, 0xed, 0xdc, 0x78, 0x54, 0x83, 0xdb, 0x1e, 0x07, 0x07, 0x56, 0xc9, 0x14, 0x66, 0x9b, + 0xc3, 0x96, 0x6e, 0x37, 0x0d, 0xc3, 0xb4, 0x1d, 0x35, 0xd7, 0x20, 0xbf, 0x43, 0xf6, 0x84, 0x7c, + 0x49, 0xc8, 0xe7, 0x3f, 0x20, 0x7b, 0x98, 0xd1, 0x51, 0x13, 0x66, 0x77, 0xd5, 0xde, 0x90, 0xf8, + 0x0a, 0x2b, 0x39, 0xbe, 0x74, 0x5e, 0x2c, 0x9d, 0xdd, 0x0c, 0xb3, 0x71, 0x74, 0xbd, 0xdc, 0x83, + 0xb2, 0xff, 0xb5, 0xa5, 0x5a, 0x86, 0x6e, 0x74, 0xd0, 0xd7, 0xa0, 0xd0, 0xd6, 0x49, 0xaf, 0x85, + 0x49, 0x5b, 0x28, 0x9c, 0x13, 0x0a, 0x0b, 0x77, 0x04, 0x1d, 0x7b, 0x2b, 0xd0, 0x75, 0x98, 0x7a, + 0xe4, 0x08, 0x56, 0xf2, 0x7c, 0xf1, 0xac, 0x58, 0x3c, 0x25, 0xf4, 0x61, 0x97, 0x2f, 0xbf, 0x0b, + 0xc5, 0xfb, 0x1b, 0x0f, 0xd6, 0xd6, 0x55, 0x5b, 0xeb, 0x9e, 0x28, 0x46, 0x6d, 0x98, 0x59, 0x65, + 0xc2, 0xcb, 0xa6, 0xd1, 0xd2, 0x79, 0x88, 0x16, 0x60, 0xc2, 0x50, 0xfb, 0x44, 0xc8, 0x4f, 0x0b, + 0xf9, 0x89, 0x35, 0xb5, 0x4f, 0x30, 0xe7, 0x44, 0xec, 0xe4, 0x32, 0xd9, 0xf9, 0xe3, 0x84, 0x30, + 0x84, 0x09, 0x35, 0x87, 0x96, 0x46, 0x28, 0x7a, 0x0c, 0x65, 0xa6, 0x8e, 0x0e, 0x54, 0x8d, 0x6c, + 0x90, 0x1e, 0xd1, 0x6c, 0xd3, 0xe2, 0x56, 0x4b, 0x4b, 0xaf, 0xd7, 0xfd, 0x1b, 0xe3, 0x25, 0x4f, + 0x7d, 0xb0, 0xd3, 0x61, 0x04, 0x5a, 0x67, 0x39, 0x5a, 0xdf, 0xbd, 0x51, 0x5f, 0x51, 0xb7, 0x49, + 0xcf, 0x15, 0x55, 0x5e, 0x1d, 0x8f, 0x6a, 0xe5, 0xb5, 0xa8, 0x46, 0x1c, 0x37, 0x82, 0x4c, 0x98, + 0x31, 0xb7, 0x3f, 0x25, 0x9a, 0xed, 0x99, 0xcd, 0x9d, 0xdc, 0x2c, 0x1a, 0x8f, 0x6a, 0x33, 0x0f, + 0x42, 0xea, 0x70, 0x44, 0x3d, 0xfa, 0x21, 0x5c, 0xb0, 0x84, 0xdf, 0x78, 0xd8, 0x23, 0xb4, 0x92, + 0x5f, 0xc8, 0x2f, 0x96, 0x96, 0x9a, 0xf5, 0xac, 0x85, 0xa1, 0xce, 0xfc, 0x6a, 0x31, 0xd9, 0x2d, + 0xdd, 0xee, 0x3e, 0x18, 0x10, 0x87, 0x4d, 0x95, 0x57, 0x45, 0xdc, 0x2f, 0xe0, 0xa0, 0x7e, 0x1c, + 0x36, 0x87, 0x7e, 0x21, 0xc1, 0x45, 0xf2, 0x58, 0xeb, 0x0d, 0x5b, 0x24, 0xb4, 0xae, 0x32, 0x71, + 0x5a, 0xfb, 0xb8, 0x2a, 0xf6, 0x71, 0xf1, 0x76, 0x82, 0x19, 0x9c, 0x68, 0x1c, 0xbd, 0x07, 0xa5, + 0x3e, 0x4b, 0x89, 0x75, 0xb3, 0xa7, 0x6b, 0x7b, 0x95, 0x29, 0x9e, 0x48, 0xf2, 0x78, 0x54, 0x2b, + 0xad, 0xfa, 0xe4, 0x83, 0x51, 0x6d, 0x36, 0xf0, 0xf9, 0x70, 0x6f, 0x40, 0x70, 0x50, 0x4c, 0xfe, + 0xab, 0x04, 0xf3, 0xab, 0x43, 0x76, 0xbf, 0x8d, 0x4e, 0xd3, 0xdd, 0xbb, 0xc3, 0x43, 0x9f, 0x40, + 0x81, 0x1d, 0x5a, 0x4b, 0xb5, 0x55, 0x91, 0x59, 0xdf, 0xc8, 0x76, 0xc4, 0xce, 0x79, 0xae, 0x12, + 0x5b, 0xf5, 0x33, 0xdb, 0xa7, 0x61, 0x4f, 0x2b, 0xea, 0xc0, 0x04, 0x1d, 0x10, 0x4d, 0x24, 0xd0, + 0xed, 0xec, 0x81, 0x4c, 0xd9, 0xf2, 0xc6, 0x80, 0x68, 0xfe, 0xa5, 0x63, 0x5f, 0x98, 0x1b, 0x90, + 0xff, 0x29, 0x41, 0x35, 0x45, 0x46, 0xd1, 0x8d, 0x16, 0xab, 0x32, 0xcf, 0xdf, 0x5b, 0x23, 0xe4, + 0xed, 0xca, 0x33, 0x7b, 0x2b, 0x76, 0x9e, 0xea, 0xf4, 0x97, 0x12, 0xc8, 0x87, 0x8b, 0xae, 0xe8, + 0xd4, 0x46, 0x1f, 0xc5, 0x1c, 0xaf, 0x67, 0xbc, 0xc9, 0x3a, 0x75, 0xdc, 0xf6, 0xca, 0xb1, 0x4b, + 0x09, 0x38, 0xdd, 0x87, 0x49, 0xdd, 0x26, 0x7d, 0x5a, 0xc9, 0xf1, 0xcb, 0xf2, 0xfe, 0x69, 0x79, + 0xad, 0x5c, 0x10, 0x46, 0x27, 0xef, 0x31, 0xf5, 0xd8, 0xb1, 0x22, 0xff, 0x26, 0x77, 0x94, 0xcf, + 0x2c, 0x40, 0xac, 0x08, 0x0f, 0x38, 0x71, 0xcd, 0x2f, 0xd6, 0xde, 0xe1, 0xad, 0x7b, 0x1c, 0x1c, + 0x58, 0xc5, 0xe2, 0x34, 0x50, 0x2d, 0xb5, 0xef, 0xb6, 0xa1, 0xd2, 0xd2, 0x52, 0x76, 0x67, 0xd6, + 0x85, 0xa4, 0x32, 0xcd, 0xe2, 0xe4, 0x7e, 0x61, 0x4f, 0x23, 0xb2, 0x61, 0xa6, 0x1f, 0xaa, 0xf0, + 0xbc, 0x7b, 0x95, 0x96, 0x6e, 0x1e, 0x23, 0x60, 0x21, 0x79, 0xa7, 0xb4, 0x86, 0x69, 0x38, 0x62, + 0x43, 0xfe, 0x42, 0x82, 0x2b, 0x29, 0xe1, 0x3a, 0x83, 0xdc, 0x68, 0x87, 0x73, 0xa3, 0xf9, 0xec, + 0xb9, 0x91, 0x9c, 0x14, 0xbf, 0x3a, 0x9f, 0xea, 0x25, 0xcf, 0x86, 0x4f, 0xa0, 0xc8, 0xcf, 0xe1, + 0x03, 0xdd, 0x68, 0x25, 0xf4, 0xd0, 0x2c, 0x47, 0xcb, 0x44, 0x95, 0x0b, 0xe3, 0x51, 0xad, 0xe8, + 0x7d, 0x62, 0x5f, 0x29, 0xfa, 0x3e, 0xcc, 0xf5, 0x05, 0x50, 0x60, 0xf2, 0xba, 0x61, 0x53, 0x91, + 0x43, 0x27, 0x3f, 0xdf, 0x8b, 0xe3, 0x51, 0x6d, 0x6e, 0x35, 0xa2, 0x15, 0xc7, 0xec, 0x20, 0x0d, + 0x8a, 0xbb, 0xaa, 0xa5, 0xab, 0xdb, 0x7e, 0xeb, 0x3c, 0x46, 0xe2, 0x6e, 0x0a, 0x51, 0xa5, 0x2c, + 0x42, 0x5b, 0x74, 0x29, 0x14, 0xfb, 0x7a, 0x99, 0x91, 0xfe, 0xd0, 0x81, 0x89, 0x6e, 0x5f, 0x5c, + 0x3a, 0xee, 0x71, 0x9a, 0x86, 0x6f, 0xc4, 0xa5, 0x50, 0xec, 0xeb, 0x45, 0x2b, 0x70, 0xa1, 0xad, + 0xea, 0xbd, 0xa1, 0x45, 0x44, 0xd3, 0x9b, 0xe4, 0x17, 0xf7, 0xab, 0xac, 0x83, 0xdf, 0x09, 0x32, + 0x0e, 0x46, 0xb5, 0x72, 0x88, 0xc0, 0x1b, 0x5f, 0x58, 0x18, 0xfd, 0x00, 0x66, 0xfb, 0x21, 0xf0, + 0x46, 0x2b, 0xe7, 0xf9, 0xc6, 0x8f, 0x7b, 0x24, 0x9e, 0x02, 0x1f, 0xe8, 0x86, 0xe9, 0x14, 0x47, + 0x2d, 0xa1, 0x9f, 0x49, 0x80, 0x2c, 0xa2, 0x1b, 0xbb, 0xa6, 0xc6, 0x35, 0x86, 0xba, 0xf8, 0xb7, + 0x85, 0x1a, 0x84, 0x63, 0x2b, 0x0e, 0x46, 0xb5, 0x5b, 0x19, 0x9e, 0x2d, 0xf5, 0xb8, 0x24, 0x0f, + 0x41, 0x82, 0x4d, 0xf9, 0x6f, 0x05, 0x98, 0x75, 0x6f, 0xc7, 0x16, 0xd9, 0xee, 0x9a, 0xe6, 0x4e, + 0x06, 0x18, 0xfb, 0x08, 0xa6, 0xb5, 0x9e, 0x4e, 0x0c, 0xdb, 0x79, 0x69, 0x88, 0x6c, 0xfe, 0x56, + 0xf6, 0xd0, 0x09, 0x53, 0xcb, 0x01, 0x25, 0xca, 0x45, 0x61, 0x68, 0x3a, 0x48, 0xc5, 0x21, 0x43, + 0xe8, 0x23, 0x98, 0xb4, 0x02, 0x28, 0xf0, 0xad, 0x2c, 0x16, 0xeb, 0x09, 0x98, 0xcb, 0x2b, 0x15, + 0x0e, 0xc8, 0x72, 0x94, 0xc6, 0x53, 0x6c, 0xe2, 0x59, 0x52, 0x2c, 0x82, 0xd1, 0x8a, 0x27, 0xc2, + 0x68, 0xc9, 0x50, 0x7f, 0xf2, 0xc5, 0x40, 0xfd, 0xd2, 0xf3, 0x85, 0xfa, 0xef, 0x41, 0x89, 0xea, + 0x2d, 0x72, 0xbb, 0xdd, 0x26, 0x9a, 0xcd, 0xee, 0xa3, 0x17, 0xb0, 0x0d, 0x9f, 0xcc, 0x02, 0xe6, + 0x7f, 0x2e, 0xf7, 0x54, 0x4a, 0x71, 0x50, 0x0c, 0xdd, 0x82, 0x19, 0xf6, 0x46, 0x36, 0x87, 0xf6, + 0x06, 0xd1, 0x4c, 0xa3, 0x45, 0xf9, 0xbd, 0x9a, 0x74, 0x76, 0xf0, 0x30, 0xc4, 0xc1, 0x91, 0x95, + 0xe8, 0x43, 0x98, 0xf7, 0xb2, 0x08, 0x93, 0x5d, 0x9d, 0x3c, 0xda, 0x24, 0x16, 0xe5, 0xd5, 0xa1, + 0xb0, 0x90, 0x5f, 0x2c, 0x2a, 0x57, 0xc6, 0xa3, 0xda, 0x7c, 0x33, 0x79, 0x09, 0x4e, 0x93, 0x45, + 0x3f, 0x4d, 0xbe, 0xef, 0xc0, 0x1d, 0x7c, 0x78, 0x56, 0x77, 0x3d, 0xa9, 0xe6, 0x4d, 0x9f, 0x55, + 0xcd, 0x93, 0xff, 0x2c, 0xc1, 0xd5, 0x48, 0xa1, 0x09, 0x8f, 0x29, 0x9e, 0x3f, 0x04, 0xff, 0x2e, + 0x14, 0x84, 0x65, 0x17, 0x74, 0x7c, 0xf3, 0xf8, 0xa0, 0x43, 0x68, 0x50, 0x26, 0x98, 0x29, 0xec, + 0x29, 0x94, 0xff, 0x21, 0xc1, 0xc2, 0x61, 0xfe, 0x9d, 0x01, 0xa2, 0xda, 0x09, 0x23, 0xaa, 0x3b, + 0x27, 0x76, 0x2e, 0xb4, 0xf1, 0x14, 0x58, 0xf5, 0xdb, 0x1c, 0x14, 0xdc, 0x3e, 0x8d, 0xde, 0x61, + 0x18, 0xca, 0xd6, 0xba, 0x2c, 0xf5, 0xc4, 0x54, 0xa3, 0xea, 0x36, 0xf3, 0x75, 0x97, 0x71, 0x10, + 0xfc, 0xc0, 0xbe, 0x00, 0xbf, 0x1e, 0x6a, 0x6c, 0x6e, 0x25, 0x20, 0xf0, 0x3b, 0xd9, 0xbd, 0x88, + 0xcf, 0xbe, 0x94, 0xaf, 0xb0, 0xcb, 0x15, 0xa7, 0xe3, 0x04, 0x7b, 0x0c, 0x08, 0x7e, 0x4a, 0x4d, + 0x83, 0x6f, 0x91, 0x57, 0xfe, 0x63, 0x01, 0x41, 0x6f, 0x96, 0xe4, 0x00, 0x41, 0xef, 0x13, 0xfb, + 0x4a, 0xe5, 0xa7, 0x12, 0xcc, 0xa7, 0x4c, 0x01, 0xd0, 0x5b, 0xfe, 0x9c, 0x83, 0x57, 0xe7, 0x8a, + 0xc4, 0x0b, 0x4e, 0x39, 0x38, 0xa0, 0xe0, 0x0c, 0x1c, 0x5e, 0x87, 0x7e, 0xc2, 0x8a, 0x4b, 0x4c, + 0x9f, 0x68, 0xc9, 0x27, 0x6e, 0x90, 0x97, 0x3d, 0x14, 0x12, 0xe3, 0xe1, 0x04, 0x73, 0xb2, 0x0a, + 0x3e, 0xf6, 0x65, 0x0f, 0x2c, 0x75, 0xa0, 0x8b, 0xf2, 0x17, 0x7d, 0x60, 0x35, 0xd7, 0xef, 0x09, + 0x0e, 0x0e, 0xac, 0x62, 0xa0, 0x63, 0x87, 0x21, 0xf0, 0x5c, 0x18, 0x74, 0x70, 0x2c, 0xcd, 0x39, + 0xf2, 0xef, 0x72, 0xe0, 0xbd, 0x9d, 0x32, 0x60, 0x94, 0x06, 0x14, 0xbd, 0x9e, 0x26, 0xb4, 0x7a, + 0x00, 0xd3, 0xeb, 0x7f, 0xd8, 0x5f, 0x83, 0x3e, 0x86, 0x02, 0x75, 0x3b, 0x5d, 0xfe, 0xe4, 0x9d, + 0x8e, 0xbf, 0xf1, 0xbc, 0x1e, 0xe7, 0xa9, 0x44, 0x36, 0xcc, 0xf3, 0x27, 0x01, 0xb1, 0x89, 0xb5, + 0x66, 0xda, 0x77, 0xcc, 0xa1, 0xd1, 0x6a, 0x6a, 0x3c, 0xd3, 0x1d, 0x98, 0x71, 0x8b, 0xf5, 0x96, + 0xf5, 0xe4, 0x25, 0x07, 0xa3, 0xda, 0x95, 0x14, 0x16, 0xbf, 0x4d, 0x69, 0xaa, 0xe5, 0x5f, 0x4b, + 0x30, 0xb7, 0x41, 0xac, 0x5d, 0x5d, 0x23, 0x98, 0xb4, 0x89, 0x45, 0x0c, 0x2d, 0x12, 0x1a, 0x29, + 0x43, 0x68, 0xdc, 0x68, 0xe7, 0x52, 0xa3, 0x7d, 0x15, 0x26, 0x06, 0xaa, 0xdd, 0x15, 0x53, 0xd7, + 0x02, 0xe3, 0xae, 0xab, 0x76, 0x17, 0x73, 0x2a, 0xe7, 0x9a, 0x96, 0xcd, 0x1d, 0x9d, 0x14, 0x5c, + 0xd3, 0xb2, 0x31, 0xa7, 0xca, 0xbf, 0x94, 0x60, 0x9a, 0x79, 0xb1, 0xdc, 0x25, 0xda, 0x8e, 0x6e, + 0x74, 0xd0, 0x67, 0x12, 0x20, 0x12, 0x9d, 0x04, 0x3b, 0x37, 0xa2, 0xb4, 0xf4, 0x76, 0xf6, 0x3b, + 0x19, 0x9b, 0x26, 0xfb, 0x69, 0x1d, 0x63, 0x51, 0x9c, 0x60, 0x52, 0xfe, 0x53, 0x0e, 0x2e, 0x6d, + 0xaa, 0x3d, 0xbd, 0xf5, 0x82, 0x66, 0x64, 0x7a, 0x68, 0x6a, 0x74, 0xf7, 0x38, 0x2f, 0xb7, 0x94, + 0x4d, 0xa7, 0x0d, 0x8c, 0xd0, 0xf7, 0xe0, 0x3c, 0xb5, 0x55, 0x7b, 0xe8, 0xce, 0x1e, 0xee, 0x9d, + 0x86, 0x31, 0xae, 0x50, 0x99, 0x11, 0xe6, 0xce, 0x3b, 0xdf, 0x58, 0x18, 0x92, 0xff, 0x2d, 0xc1, + 0x42, 0xaa, 0xec, 0xd9, 0x8d, 0xe6, 0x06, 0xa1, 0x20, 0xaf, 0x9d, 0x82, 0xdf, 0x47, 0x0d, 0xe7, + 0xfe, 0x25, 0xc1, 0x6b, 0x47, 0x09, 0x9f, 0x01, 0x60, 0x30, 0xc3, 0x80, 0xe1, 0xfe, 0xe9, 0x79, + 0x9e, 0x02, 0x1a, 0x3e, 0xcb, 0x1f, 0xed, 0xf7, 0xcb, 0x11, 0x5d, 0xe0, 0x1f, 0x3d, 0x5b, 0x50, + 0xde, 0x15, 0xf1, 0x32, 0x0d, 0xa7, 0xa4, 0x3b, 0x13, 0x96, 0xa2, 0x72, 0x9d, 0x3d, 0xe4, 0x36, + 0xa3, 0xcc, 0x83, 0x51, 0x6d, 0x2e, 0x4a, 0xc4, 0x71, 0x1d, 0xf2, 0xdf, 0x25, 0xb8, 0x96, 0x7a, + 0x12, 0x67, 0x90, 0x7a, 0xdd, 0x70, 0xea, 0x2d, 0x9f, 0x46, 0xea, 0xa5, 0xce, 0xff, 0xae, 0x1d, + 0x5a, 0x0d, 0xff, 0xcf, 0x27, 0x80, 0x3b, 0x50, 0xf2, 0x8f, 0xdf, 0x1d, 0x9c, 0xbc, 0x71, 0xfc, + 0x78, 0x9b, 0x86, 0xf2, 0x8a, 0x08, 0x70, 0xc9, 0xa7, 0x51, 0x1c, 0xd4, 0x7e, 0xca, 0x13, 0x94, + 0x1f, 0xc1, 0x9c, 0x1a, 0xfe, 0x2f, 0x34, 0xad, 0x4c, 0x1e, 0xf7, 0xe1, 0x16, 0xf9, 0x3f, 0xb6, + 0x52, 0x11, 0x4e, 0xcc, 0x45, 0x18, 0x14, 0xc7, 0x8c, 0xbd, 0xd8, 0x29, 0x61, 0x68, 0x74, 0x3b, + 0xf5, 0x7c, 0x46, 0xb7, 0xf2, 0x1f, 0x72, 0x50, 0x3b, 0xa2, 0x7d, 0xa3, 0xfb, 0x80, 0xcc, 0x6d, + 0x4a, 0xac, 0x5d, 0xd2, 0xba, 0xeb, 0xfc, 0xe2, 0xc0, 0x85, 0xf5, 0x79, 0x1f, 0x50, 0x3d, 0x88, + 0xad, 0xc0, 0x09, 0x52, 0xa8, 0x07, 0xd3, 0x76, 0x00, 0xea, 0x89, 0x5b, 0xf0, 0x66, 0x76, 0xbf, + 0x82, 0x40, 0x51, 0x99, 0x1b, 0x8f, 0x6a, 0x21, 0xe8, 0x88, 0x43, 0xda, 0x91, 0x06, 0xa0, 0xf9, + 0x47, 0xe7, 0xa4, 0x7e, 0x23, 0x5b, 0x15, 0xf3, 0x4f, 0xcc, 0xeb, 0x3b, 0x81, 0xc3, 0x0a, 0xa8, + 0x95, 0xf7, 0xa7, 0xa0, 0xec, 0x87, 0xf0, 0xe5, 0x10, 0xf5, 0xe5, 0x10, 0xf5, 0xd0, 0x21, 0x2a, + 0xbc, 0x1c, 0xa2, 0x9e, 0x68, 0x88, 0x9a, 0x50, 0x8b, 0x4b, 0x67, 0x36, 0xbd, 0xdc, 0x97, 0xa0, + 0x1a, 0xbb, 0xe3, 0x67, 0x3d, 0xbf, 0xfc, 0x38, 0x36, 0xbf, 0x7c, 0xfb, 0x24, 0xb0, 0x29, 0x6d, + 0x82, 0xf9, 0xa5, 0x04, 0xf2, 0xe1, 0x3e, 0xfe, 0x4f, 0xff, 0x62, 0xe0, 0xf0, 0xad, 0xa7, 0x80, + 0xc3, 0xff, 0x48, 0x00, 0x3e, 0x98, 0x41, 0xaf, 0x41, 0xe0, 0x47, 0x58, 0xa2, 0x74, 0x3b, 0x61, + 0x0a, 0xd0, 0xd1, 0x75, 0x98, 0xea, 0x13, 0x4a, 0xd5, 0x8e, 0x3b, 0x10, 0xf1, 0x7e, 0x64, 0xb6, + 0xea, 0x90, 0xb1, 0xcb, 0x47, 0x5b, 0x70, 0xde, 0x22, 0x2a, 0x15, 0xd3, 0xcc, 0xa2, 0xf2, 0x2e, + 0x7b, 0x05, 0x63, 0x4e, 0x39, 0x18, 0xd5, 0x6e, 0x64, 0xf9, 0x39, 0x61, 0x5d, 0x3c, 0x9a, 0xb9, + 0x10, 0x16, 0xea, 0xd0, 0x5d, 0x28, 0x0b, 0x1b, 0x81, 0x0d, 0x3b, 0x95, 0xf6, 0x92, 0xd8, 0x4d, + 0x79, 0x35, 0xba, 0x00, 0xc7, 0x65, 0xe4, 0xfb, 0x50, 0x70, 0x81, 0x01, 0xaa, 0xc0, 0x44, 0xe0, + 0xbd, 0xe5, 0x38, 0xce, 0x29, 0x91, 0xc0, 0xe4, 0x92, 0x03, 0x23, 0xff, 0x5e, 0x82, 0x57, 0x12, + 0x9a, 0x12, 0xba, 0x04, 0xf9, 0xa1, 0xd5, 0x13, 0x21, 0x98, 0x1a, 0x8f, 0x6a, 0xf9, 0x0f, 0xf1, + 0x0a, 0x66, 0x34, 0xa4, 0xc2, 0x14, 0x75, 0xc6, 0x53, 0x22, 0x99, 0x6e, 0x65, 0x3f, 0xf1, 0xe8, + 0x5c, 0x4b, 0x29, 0xb1, 0x33, 0x70, 0xa9, 0xae, 0x5e, 0xb4, 0x08, 0x05, 0x4d, 0x55, 0x86, 0x46, + 0xab, 0xe7, 0x9c, 0xd7, 0xb4, 0xf3, 0xc6, 0x5b, 0x6e, 0x3a, 0x34, 0xec, 0x71, 0x95, 0xb5, 0x27, + 0xfb, 0xd5, 0x73, 0x9f, 0xef, 0x57, 0xcf, 0x3d, 0xdd, 0xaf, 0x9e, 0xfb, 0xf1, 0xb8, 0x2a, 0x3d, + 0x19, 0x57, 0xa5, 0xcf, 0xc7, 0x55, 0xe9, 0xe9, 0xb8, 0x2a, 0xfd, 0x65, 0x5c, 0x95, 0x7e, 0xfe, + 0x45, 0xf5, 0xdc, 0x77, 0x16, 0xb3, 0xfe, 0x98, 0xf5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13, + 0x7c, 0x49, 0xa4, 0xf7, 0x2a, 0x00, 0x00, +} + +func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplyConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ApplyConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { @@ -971,6 +1277,34 @@ func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *JSONPatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONPatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONPatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MatchCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1086,7 +1420,7 @@ func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1096,112 +1430,18 @@ func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.MatchConditions) > 0 { - for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - } - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.ReinvocationPolicy != nil { - i -= len(*m.ReinvocationPolicy) - copy(dAtA[i:], *m.ReinvocationPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) - i-- - dAtA[i] = 0x52 - } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i-- - dAtA[i] = 0x4a - } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x42 - } - } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i-- - dAtA[i] = 0x32 - } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1210,15 +1450,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1228,30 +1473,26 @@ func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1265,7 +1506,7 @@ func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, e return len(dAtA) - i, nil } -func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1275,12 +1516,12 @@ func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1312,7 +1553,7 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1322,39 +1563,49 @@ func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - i-- - dAtA[i] = 0x12 - if len(m.ResourceNames) > 0 { - for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ResourceNames[iNdEx]) - copy(dAtA[i:], m.ResourceNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ParamKind) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1364,30 +1615,44 @@ func (m *ParamKind) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ParamRef) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1397,26 +1662,73 @@ func (m *ParamRef) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ParameterNotFoundAction != nil { - i -= len(*m.ParameterNotFoundAction) - copy(dAtA[i:], *m.ParameterNotFoundAction) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) + i -= len(m.ReinvocationPolicy) + copy(dAtA[i:], m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x3a + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x2a } - if m.Selector != nil { + if len(m.Mutations) > 0 { + for iNdEx := len(m.Mutations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mutations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchConstraints != nil { { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1424,67 +1736,24 @@ func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - } - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Port != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) - i-- - dAtA[i] = 0x20 + dAtA[i] = 0x12 } - if m.Path != nil { - i -= len(*m.Path) - copy(dAtA[i:], *m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *TypeChecking) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1494,20 +1763,20 @@ func (m *TypeChecking) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ExpressionWarnings) > 0 { - for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1515,54 +1784,91 @@ func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x62 } } - return len(dAtA) - i, nil -} - -func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - return dAtA[:n], nil -} - -func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1570,11 +1876,16 @@ func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, erro i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1584,26 +1895,30 @@ func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1617,7 +1932,7 @@ func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1627,12 +1942,12 @@ func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1664,7 +1979,7 @@ func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { +func (m *Mutation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1674,28 +1989,19 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *Mutation) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Mutation) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ValidationActions) > 0 { - for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ValidationActions[iNdEx]) - copy(dAtA[i:], m.ValidationActions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if m.MatchResources != nil { + if m.JSONPatch != nil { { - size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.JSONPatch.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1703,11 +2009,11 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } - if m.ParamRef != nil { + if m.ApplyConfiguration != nil { { - size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ApplyConfiguration.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1715,17 +2021,17 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - i -= len(m.PolicyName) - copy(dAtA[i:], m.PolicyName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i -= len(m.PatchType) + copy(dAtA[i:], m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchType))) i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1735,32 +2041,18 @@ func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1768,11 +2060,20 @@ func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *ParamKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1782,94 +2083,59 @@ func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Variables) > 0 { - for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if len(m.MatchConditions) > 0 { - for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.AuditAnnotations) > 0 { - for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ParamRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + return dAtA[:n], nil +} + +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) i-- dAtA[i] = 0x22 } - if len(m.Validations) > 0 { - for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.MatchConstraints != nil { - { - size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ParamKind != nil { + if m.Selector != nil { { - size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1877,12 +2143,22 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1892,49 +2168,42 @@ func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 } - if m.TypeChecking != nil { - { - size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1944,20 +2213,20 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.MatchConditions) > 0 { - for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1965,84 +2234,44 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x5a - } - } - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x52 } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i-- - dAtA[i] = 0x4a + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x42 - } - } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i-- - dAtA[i] = 0x32 - } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2051,15 +2280,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2069,30 +2303,26 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 { size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -2106,7 +2336,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2116,12 +2346,12 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2153,7 +2383,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) ( return len(dAtA) - i, nil } -func (m *Validation) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2163,42 +2393,58 @@ func (m *Validation) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Validation) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.MessageExpression) - copy(dAtA[i:], m.MessageExpression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) - i-- - dAtA[i] = 0x22 - if m.Reason != nil { - i -= len(*m.Reason) - copy(dAtA[i:], *m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x1a } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Variable) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2208,30 +2454,44 @@ func (m *Variable) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Variable) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2241,335 +2501,636 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0x1a - } - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i-- - dAtA[i] = 0x12 - } - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ } - dAtA[offset] = uint8(v) - return base -} -func (m *AuditAnnotation) Size() (n int) { - if m == nil { - return 0 + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ValueExpression) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ExpressionWarning) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.FieldRef) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Warning) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *MatchCondition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MatchResources) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ResourceRules) > 0 { - for _, e := range m.ResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if len(m.ExcludeResourceRules) > 0 { - for _, e := range m.ExcludeResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *MutatingWebhook) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a } if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 } } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReinvocationPolicy != nil { - l = len(*m.ReinvocationPolicy) - n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 } - if len(m.MatchConditions) > 0 { - for _, e := range m.MatchConditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - return n -} - -func (m *MutatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - return n -} - -func (m *MutatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamedRuleWithOperations) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = m.RuleWithOperations.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ParamKind) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamRef) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if m.ParameterNotFoundAction != nil { - l = len(*m.ParameterNotFoundAction) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *TypeChecking) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.ExpressionWarnings) > 0 { - for _, e := range m.ExpressionWarnings { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicy) Size() (n int) { - if m == nil { - return 0 +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ApplyConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JSONPatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MutatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { +func (m *MutatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MutatingAdmissionPolicyBindingList) Size() (n int) { if m == nil { return 0 } @@ -2586,7 +3147,7 @@ func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { return n } -func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { +func (m *MutatingAdmissionPolicyBindingSpec) Size() (n int) { if m == nil { return 0 } @@ -2602,16 +3163,10 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { l = m.MatchResources.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.ValidationActions) > 0 { - for _, s := range m.ValidationActions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *ValidatingAdmissionPolicyList) Size() (n int) { +func (m *MutatingAdmissionPolicyList) Size() (n int) { if m == nil { return 0 } @@ -2628,7 +3183,7 @@ func (m *ValidatingAdmissionPolicyList) Size() (n int) { return n } -func (m *ValidatingAdmissionPolicySpec) Size() (n int) { +func (m *MutatingAdmissionPolicySpec) Size() (n int) { if m == nil { return 0 } @@ -2642,8 +3197,14 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) { l = m.MatchConstraints.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Validations) > 0 { - for _, e := range m.Validations { + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Mutations) > 0 { + for _, e := range m.Mutations { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -2652,48 +3213,18 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) { l = len(*m.FailurePolicy) n += 1 + l + sovGenerated(uint64(l)) } - if len(m.AuditAnnotations) > 0 { - for _, e := range m.AuditAnnotations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } if len(m.MatchConditions) > 0 { for _, e := range m.MatchConditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.Variables) > 0 { - for _, e := range m.Variables { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if m.TypeChecking != nil { - l = m.TypeChecking.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + l = len(m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ValidatingWebhook) Size() (n int) { +func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 } @@ -2734,6 +3265,10 @@ func (m *ValidatingWebhook) Size() (n int) { l = len(*m.MatchPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } if m.ObjectSelector != nil { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -2747,7 +3282,7 @@ func (m *ValidatingWebhook) Size() (n int) { return n } -func (m *ValidatingWebhookConfiguration) Size() (n int) { +func (m *MutatingWebhookConfiguration) Size() (n int) { if m == nil { return 0 } @@ -2764,7 +3299,7 @@ func (m *ValidatingWebhookConfiguration) Size() (n int) { return n } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { +func (m *MutatingWebhookConfigurationList) Size() (n int) { if m == nil { return 0 } @@ -2781,476 +3316,1911 @@ func (m *ValidatingWebhookConfigurationList) Size() (n int) { return n } -func (m *Validation) Size() (n int) { +func (m *Mutation) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = len(m.PatchType) n += 1 + l + sovGenerated(uint64(l)) - if m.Reason != nil { - l = len(*m.Reason) + if m.ApplyConfiguration != nil { + l = m.ApplyConfiguration.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.MessageExpression) + if m.JSONPatch != nil { + l = m.JSONPatch.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NamedRuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RuleWithOperations.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *Variable) Size() (n int) { +func (m *ParamKind) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + l = len(m.APIVersion) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) + l = len(m.Kind) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *WebhookClientConfig) Size() (n int) { +func (m *ParamRef) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Service != nil { - l = m.Service.Size() + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.CABundle != nil { - l = len(m.CABundle) + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) n += 1 + l + sovGenerated(uint64(l)) } - if m.URL != nil { - l = len(*m.URL) + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *AuditAnnotation) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AuditAnnotation{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ExpressionWarning) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ExpressionWarning{`, - `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, - `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *MatchCondition) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&MatchCondition{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *MatchResources) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForResourceRules := "[]NamedRuleWithOperations{" - for _, f := range this.ResourceRules { - repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForResourceRules += "}" - repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" - for _, f := range this.ExcludeResourceRules { - repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForExcludeResourceRules += "}" - s := strings.Join([]string{`&MatchResources{`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ResourceRules:` + repeatedStringForResourceRules + `,`, - `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `}`, - }, "") - return s + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *MutatingWebhook) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += fmt.Sprintf("%v", f) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForRules += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForMatchConditions += "}" - s := strings.Join([]string{`&MutatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, - `}`, - }, "") - return s + return n } -func (this *MutatingWebhookConfiguration) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForWebhooks := "[]MutatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s -} -func (this *MutatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems := "[]MutatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NamedRuleWithOperations) String() string { - if this == nil { - return "nil" + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NamedRuleWithOperations{`, - `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, - `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ParamKind) String() string { - if this == nil { - return "nil" + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ParamKind{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `}`, - }, "") - return s -} -func (this *ParamRef) String() string { - if this == nil { - return "nil" + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ParamRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s + return n } -func (this *TypeChecking) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForExpressionWarnings := "[]ExpressionWarning{" - for _, f := range this.ExpressionWarnings { - repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForExpressionWarnings += "}" - s := strings.Join([]string{`&TypeChecking{`, - `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicy) String() string { - if this == nil { - return "nil" + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ValidatingAdmissionPolicyBinding) String() string { - if this == nil { - return "nil" + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicyBindingList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicyBindingSpec) String() string { - if this == nil { - return "nil" + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, - `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, - `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, - `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, - `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicyList) String() string { - if this == nil { - return "nil" + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems := "[]ValidatingAdmissionPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingAdmissionPolicySpec) String() string { - if this == nil { - return "nil" + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForValidations := "[]Validation{" - for _, f := range this.Validations { - repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForValidations += "}" - repeatedStringForAuditAnnotations := "[]AuditAnnotation{" - for _, f := range this.AuditAnnotations { - repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForAuditAnnotations += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForMatchConditions += "}" - repeatedStringForVariables := "[]Variable{" - for _, f := range this.Variables { - repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForVariables += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, - `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, - `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, - `Validations:` + repeatedStringForValidations + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, - `Variables:` + repeatedStringForVariables + `,`, + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ApplyConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ApplyConfiguration{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, `}`, }, "") return s } -func (this *ValidatingAdmissionPolicyStatus) String() string { +func (this *AuditAnnotation) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhook) String() string { +func (this *ExpressionWarning) String() string { if this == nil { return "nil" } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += fmt.Sprintf("%v", f) + "," + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *JSONPatch) String() string { + if this == nil { + return "nil" } - repeatedStringForRules += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + s := strings.Join([]string{`&JSONPatch{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" } - repeatedStringForMatchConditions += "}" - s := strings.Join([]string{`&ValidatingWebhook{`, + s := strings.Join([]string{`&MatchCondition{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfiguration) String() string { +func (this *MutatingAdmissionPolicy) String() string { if this == nil { return "nil" } - repeatedStringForWebhooks := "[]ValidatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + s := strings.Join([]string{`&MutatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicySpec", "MutatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + s := strings.Join([]string{`&MutatingAdmissionPolicyBinding{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicyBindingSpec", "MutatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfigurationList) String() string { +func (this *MutatingAdmissionPolicyBindingList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + repeatedStringForItems := "[]MutatingAdmissionPolicyBinding{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicyBinding", "MutatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + s := strings.Join([]string{`&MutatingAdmissionPolicyBindingList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *Validation) String() string { +func (this *MutatingAdmissionPolicyBindingSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Validation{`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + valueToStringGenerated(this.Reason) + `,`, - `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + s := strings.Join([]string{`&MutatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, `}`, }, "") return s } -func (this *Variable) String() string { +func (this *MutatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicy", "MutatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicySpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Variable{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + repeatedStringForMutations := "[]Mutation{" + for _, f := range this.Mutations { + repeatedStringForMutations += strings.Replace(strings.Replace(f.String(), "Mutation", "Mutation", 1), `&`, ``, 1) + "," + } + repeatedStringForMutations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `Mutations:` + repeatedStringForMutations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `ReinvocationPolicy:` + fmt.Sprintf("%v", this.ReinvocationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Mutation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mutation{`, + `PatchType:` + fmt.Sprintf("%v", this.PatchType) + `,`, + `ApplyConfiguration:` + strings.Replace(this.ApplyConfiguration.String(), "ApplyConfiguration", "ApplyConfiguration", 1) + `,`, + `JSONPatch:` + strings.Replace(this.JSONPatch.String(), "JSONPatch", "JSONPatch", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ApplyConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONPatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONPatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONPatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `}`, - }, "") - return s + return nil } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" +func (m *MutatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3273,17 +5243,17 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3293,29 +5263,30 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3325,23 +5296,25 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ValueExpression = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, MutatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3364,7 +5337,7 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3387,15 +5360,15 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3423,13 +5396,49 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FieldRef = string(dAtA[iNdEx:postIndex]) + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3439,23 +5448,27 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Warning = string(dAtA[iNdEx:postIndex]) + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3478,7 +5491,7 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { } return nil } -func (m *MatchCondition) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3501,17 +5514,17 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicyList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3521,29 +5534,30 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3553,23 +5567,25 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Expression = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, MutatingAdmissionPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3592,7 +5608,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *MatchResources) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3615,15 +5631,15 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3650,16 +5666,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3686,16 +5702,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3722,14 +5738,14 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) - if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3756,14 +5772,81 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) - if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Mutations = append(m.Mutations, Mutation{}) + if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3791,8 +5874,7 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s + m.ReinvocationPolicy = k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4160,7 +6242,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + s := k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex]) m.ReinvocationPolicy = &s iNdEx = postIndex case 11: @@ -4488,6 +6570,160 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } +func (m *Mutation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mutation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PatchType = PatchType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplyConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ApplyConfiguration == nil { + m.ApplyConfiguration = &ApplyConfiguration{} + } + if err := m.ApplyConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPatch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JSONPatch == nil { + m.JSONPatch = &JSONPatch{} + } + if err := m.JSONPatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 30f99f64d0f..fb47a20056e 100644 --- a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1beta1"; +// ApplyConfiguration defines the desired configuration values of an object. +message ApplyConfiguration { + // expression will be evaluated by CEL to create an apply configuration. + // ref: https://github.com/google/cel-spec + // + // Apply configurations are declared in CEL using object initialization. For example, this CEL expression + // returns an apply configuration to set a single field: + // + // Object{ + // spec: Object.spec{ + // serviceAccountName: "example" + // } + // } + // + // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of + // values not included in the apply configuration. + // + // CEL expressions have access to the object types needed to create apply configurations: + // + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + optional string expression = 1; +} + // AuditAnnotation describes how to produce an audit annotation for an API request. message AuditAnnotation { // key specifies the audit annotation key. The audit annotation keys of @@ -79,6 +124,75 @@ message ExpressionWarning { optional string warning = 3; } +// JSONPatch defines a JSON Patch. +message JSONPatch { + // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). + // ref: https://github.com/google/cel-spec + // + // expression must return an array of JSONPatch values. + // + // For example, this CEL expression returns a JSON patch to conditionally modify a value: + // + // [ + // JSONPatch{op: "test", path: "/spec/example", value: "Red"}, + // JSONPatch{op: "replace", path: "/spec/example", value: "Green"} + // ] + // + // To define an object for the patch value, use Object types. For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/spec/selector", + // value: Object.spec.selector{matchLabels: {"environment": "test"}} + // } + // ] + // + // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"), + // value: "test" + // }, + // ] + // + // CEL expressions have access to the types needed to create JSON patches and objects: + // + // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. + // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, + // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a + // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL + // function may be used to escape path keys containing '/' and '~'. + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) + // as well as: + // + // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + optional string expression = 1; +} + // MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. message MatchCondition { // Name is an identifier for this match condition, used for strategic merging of MatchConditions, @@ -203,6 +317,173 @@ message MatchResources { optional string matchPolicy = 7; } +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain. +message MutatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the MutatingAdmissionPolicy. + optional MutatingAdmissionPolicySpec spec = 2; +} + +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators +// configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget). +// +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message MutatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the MutatingAdmissionPolicyBinding. + optional MutatingAdmissionPolicyBindingSpec spec = 2; +} + +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding. +message MutatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated MutatingAdmissionPolicyBinding items = 2; +} + +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding. +message MutatingAdmissionPolicyBindingSpec { + // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // matchResources limits what resources match this binding and may be mutated by it. + // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and + // matchConditions before the resource may be mutated. + // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints + // and matchConditions must match for the resource to be mutated. + // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. + // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // +optional + optional MatchResources matchResources = 3; +} + +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy. +message MutatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated MutatingAdmissionPolicy items = 2; +} + +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy. +message MutatingAdmissionPolicySpec { + // paramKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // matchConstraints specifies what resources this policy is designed to validate. + // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // Required. + optional MatchResources matchConstraints = 2; + + // variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except matchConditions because matchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, variables must be sorted by the order of first appearance and acyclic. + // +listType=atomic + // +optional + repeated Variable variables = 3; + + // mutations contain operations to perform on matching objects. + // mutations may not be empty; a minimum of one mutation is required. + // mutations are evaluated in order, and are reinvoked according to + // the reinvocationPolicy. + // The mutations of a policy are invoked for each binding of this policy + // and reinvocation of mutations occurs on a per binding basis. + // + // +listType=atomic + // +optional + repeated Mutation mutations = 4; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if paramKind refers to a non-existent Kind. + // A binding is invalid if paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 5; + + // matchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the matchConstraints. + // An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding + // as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: These mutations will not be called more than once per binding in a single admission evaluation. + // + // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of + // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only + // reinvoked when mutations change the object after this mutation is invoked. + // Required. + optional string reinvocationPolicy = 7; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -401,6 +682,26 @@ message MutatingWebhookConfigurationList { repeated MutatingWebhookConfiguration items = 2; } +// Mutation specifies the CEL expression which is used to apply the Mutation. +message Mutation { + // patchType indicates the patch strategy used. + // Allowed values are "ApplyConfiguration" and "JSONPatch". + // Required. + // + // +unionDiscriminator + optional string patchType = 2; + + // applyConfiguration defines the desired configuration values of an object. + // The configuration is applied to the admission object using + // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). + // A CEL expression is used to create apply configuration. + optional ApplyConfiguration applyConfiguration = 3; + + // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. + // A CEL expression is used to create the JSON patch. + optional JSONPatch jsonPatch = 4; +} + // NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. // +structType=atomic message NamedRuleWithOperations { diff --git a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/register.go index 363233a2f9a..be64c4a5fab 100644 --- a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/register.go +++ b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/register.go @@ -54,6 +54,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ValidatingAdmissionPolicyList{}, &ValidatingAdmissionPolicyBinding{}, &ValidatingAdmissionPolicyBindingList{}, + &MutatingAdmissionPolicy{}, + &MutatingAdmissionPolicyList{}, + &MutatingAdmissionPolicyBinding{}, + &MutatingAdmissionPolicyBindingList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 0f590312392..cffdda82c9a 100644 --- a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -1073,7 +1073,7 @@ type MutatingWebhook struct { } // ReinvocationPolicyType specifies what type of policy the admission hook uses. -type ReinvocationPolicyType string +type ReinvocationPolicyType = v1.ReinvocationPolicyType const ( // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a @@ -1197,3 +1197,332 @@ type MatchCondition struct { // Required. Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain. +type MutatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the MutatingAdmissionPolicy. + Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy. +type MutatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy. +type MutatingAdmissionPolicySpec struct { + // paramKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // matchConstraints specifies what resources this policy is designed to validate. + // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except matchConditions because matchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, variables must be sorted by the order of first appearance and acyclic. + // +listType=atomic + // +optional + Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"` + + // mutations contain operations to perform on matching objects. + // mutations may not be empty; a minimum of one mutation is required. + // mutations are evaluated in order, and are reinvoked according to + // the reinvocationPolicy. + // The mutations of a policy are invoked for each binding of this policy + // and reinvocation of mutations occurs on a per binding basis. + // + // +listType=atomic + // +optional + Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if paramKind refers to a non-existent Kind. + // A binding is invalid if paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the matchConstraints. + // An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding + // as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: These mutations will not be called more than once per binding in a single admission evaluation. + // + // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of + // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only + // reinvoked when mutations change the object after this mutation is invoked. + // Required. + ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` +} + +// Mutation specifies the CEL expression which is used to apply the Mutation. +type Mutation struct { + // patchType indicates the patch strategy used. + // Allowed values are "ApplyConfiguration" and "JSONPatch". + // Required. + // + // +unionDiscriminator + PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"` + + // applyConfiguration defines the desired configuration values of an object. + // The configuration is applied to the admission object using + // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). + // A CEL expression is used to create apply configuration. + ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"` + + // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. + // A CEL expression is used to create the JSON patch. + JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"` +} + +// PatchType specifies the type of patch operation for a mutation. +// +enum +type PatchType string + +const ( + // ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object. + PatchTypeApplyConfiguration PatchType = "ApplyConfiguration" + // JSONPatch indicates that the object is mutated through JSON Patch. + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// ApplyConfiguration defines the desired configuration values of an object. +type ApplyConfiguration struct { + // expression will be evaluated by CEL to create an apply configuration. + // ref: https://github.com/google/cel-spec + // + // Apply configurations are declared in CEL using object initialization. For example, this CEL expression + // returns an apply configuration to set a single field: + // + // Object{ + // spec: Object.spec{ + // serviceAccountName: "example" + // } + // } + // + // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of + // values not included in the apply configuration. + // + // CEL expressions have access to the object types needed to create apply configurations: + // + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"` +} + +// JSONPatch defines a JSON Patch. +type JSONPatch struct { + // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). + // ref: https://github.com/google/cel-spec + // + // expression must return an array of JSONPatch values. + // + // For example, this CEL expression returns a JSON patch to conditionally modify a value: + // + // [ + // JSONPatch{op: "test", path: "/spec/example", value: "Red"}, + // JSONPatch{op: "replace", path: "/spec/example", value: "Green"} + // ] + // + // To define an object for the patch value, use Object types. For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/spec/selector", + // value: Object.spec.selector{matchLabels: {"environment": "test"}} + // } + // ] + // + // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"), + // value: "test" + // }, + // ] + // + // CEL expressions have access to the types needed to create JSON patches and objects: + // + // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. + // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, + // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a + // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL + // function may be used to escape path keys containing '/' and '~'. + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) + // as well as: + // + // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). + // + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators +// configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget). +// +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type MutatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the MutatingAdmissionPolicyBinding. + Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding. +type MutatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding. +type MutatingAdmissionPolicyBindingSpec struct { + // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // matchResources limits what resources match this binding and may be mutated by it. + // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and + // matchConditions before the resource may be mutated. + // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints + // and matchConditions must match for the resource to be mutated. + // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. + // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` +} diff --git a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index cc1509b539a..1a97c94729f 100644 --- a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -27,6 +27,15 @@ package v1beta1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ApplyConfiguration = map[string]string{ + "": "ApplyConfiguration defines the desired configuration values of an object.", + "expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t spec: Object.spec{\n\t serviceAccountName: \"example\"\n\t }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.", +} + +func (ApplyConfiguration) SwaggerDoc() map[string]string { + return map_ApplyConfiguration +} + var map_AuditAnnotation = map[string]string{ "": "AuditAnnotation describes how to produce an audit annotation for an API request.", "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", @@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string { return map_ExpressionWarning } +var map_JSONPatch = map[string]string{ + "": "JSONPatch defines a JSON Patch.", + "expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t [\n\t JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/spec/selector\",\n\t value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t }\n\t ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t value: \"test\"\n\t },\n\t ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n integer, array, map or object. If set, the 'path' and 'from' fields must be set to a\n [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.", +} + +func (JSONPatch) SwaggerDoc() map[string]string { + return map_JSONPatch +} + var map_MatchCondition = map[string]string{ "": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.", "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", @@ -70,6 +88,72 @@ func (MatchResources) SwaggerDoc() map[string]string { return map_MatchResources } +var map_MutatingAdmissionPolicy = map[string]string{ + "": "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the MutatingAdmissionPolicy.", +} + +func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicy +} + +var map_MutatingAdmissionPolicyBinding = map[string]string{ + "": "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.", +} + +func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBinding +} + +var map_MutatingAdmissionPolicyBindingList = map[string]string{ + "": "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBindingList +} + +var map_MutatingAdmissionPolicyBindingSpec = map[string]string{ + "": "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.", + "policyName": "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.", +} + +func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBindingSpec +} + +var map_MutatingAdmissionPolicyList = map[string]string{ + "": "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyList +} + +var map_MutatingAdmissionPolicySpec = map[string]string{ + "": "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.", + "paramKind": "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.", + "variables": "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.", + "mutations": "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "matchConditions": "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.", +} + +func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicySpec +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -110,6 +194,17 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_MutatingWebhookConfigurationList } +var map_Mutation = map[string]string{ + "": "Mutation specifies the CEL expression which is used to apply the Mutation.", + "patchType": "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.", + "applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.", + "jsonPatch": "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.", +} + +func (Mutation) SwaggerDoc() map[string]string { + return map_Mutation +} + var map_NamedRuleWithOperations = map[string]string{ "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", diff --git a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index 4c10b1d1135..3749a3d1413 100644 --- a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -27,6 +27,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration. +func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration { + if in == nil { + return nil + } + out := new(ApplyConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { *out = *in @@ -59,6 +75,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONPatch) DeepCopyInto(out *JSONPatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch. +func (in *JSONPatch) DeepCopy() *JSONPatch { + if in == nil { + return nil + } + out := new(JSONPatch) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { *out = *in @@ -120,6 +152,200 @@ func (in *MatchResources) DeepCopy() *MatchResources { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy. +func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding. +func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList. +func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec. +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList. +func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + if in.Mutations != nil { + in, out := &in.Mutations, &out.Mutations + *out = make([]Mutation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec. +func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -168,7 +394,7 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { } if in.ReinvocationPolicy != nil { in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy - *out = new(ReinvocationPolicyType) + *out = new(admissionregistrationv1.ReinvocationPolicyType) **out = **in } if in.MatchConditions != nil { @@ -255,6 +481,32 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mutation) DeepCopyInto(out *Mutation) { + *out = *in + if in.ApplyConfiguration != nil { + in, out := &in.ApplyConfiguration, &out.ApplyConfiguration + *out = new(ApplyConfiguration) + **out = **in + } + if in.JSONPatch != nil { + in, out := &in.JSONPatch, &out.JSONPatch + *out = new(JSONPatch) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation. +func (in *Mutation) DeepCopy() *Mutation { + if in == nil { + return nil + } + out := new(Mutation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { *out = *in diff --git a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go index c1be5122a87..4fc0596b345 100644 --- a/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/e2e/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,78 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { diff --git a/e2e/vendor/k8s.io/api/apps/v1/generated.proto b/e2e/vendor/k8s.io/api/apps/v1/generated.proto index 38c8997e994..5885a62225f 100644 --- a/e2e/vendor/k8s.io/api/apps/v1/generated.proto +++ b/e2e/vendor/k8s.io/api/apps/v1/generated.proto @@ -530,7 +530,7 @@ message RollingUpdateDaemonSet { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may diff --git a/e2e/vendor/k8s.io/api/apps/v1/types.go b/e2e/vendor/k8s.io/api/apps/v1/types.go index 1362d875d88..4cf54cc99b6 100644 --- a/e2e/vendor/k8s.io/api/apps/v1/types.go +++ b/e2e/vendor/k8s.io/api/apps/v1/types.go @@ -635,7 +635,7 @@ type RollingUpdateDaemonSet struct { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may diff --git a/e2e/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index f44ba7bc332..ac54033fd6d 100644 --- a/e2e/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/e2e/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { diff --git a/e2e/vendor/k8s.io/api/apps/v1beta1/generated.proto b/e2e/vendor/k8s.io/api/apps/v1beta1/generated.proto index 0601efc3c47..b61dc490dbd 100644 --- a/e2e/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/e2e/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -316,6 +316,9 @@ message Scale { message ScaleSpec { // replicas is the number of observed instances of the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 optional int32 replicas = 1; } diff --git a/e2e/vendor/k8s.io/api/apps/v1beta1/types.go b/e2e/vendor/k8s.io/api/apps/v1beta1/types.go index 5530c990daa..cd140be12fa 100644 --- a/e2e/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/e2e/vendor/k8s.io/api/apps/v1beta1/types.go @@ -33,6 +33,9 @@ const ( type ScaleSpec struct { // replicas is the number of observed instances of the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } @@ -60,6 +63,7 @@ type ScaleStatus struct { // +k8s:prerelease-lifecycle-gen:deprecated=1.8 // +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale +// +k8s:isSubresource=/scale // Scale represents a scaling request for a resource. type Scale struct { diff --git a/e2e/vendor/k8s.io/api/apps/v1beta2/generated.proto b/e2e/vendor/k8s.io/api/apps/v1beta2/generated.proto index 68c463e2570..37c6d5ae1bf 100644 --- a/e2e/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/e2e/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -536,7 +536,7 @@ message RollingUpdateDaemonSet { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may @@ -614,6 +614,9 @@ message Scale { message ScaleSpec { // desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 optional int32 replicas = 1; } diff --git a/e2e/vendor/k8s.io/api/apps/v1beta2/types.go b/e2e/vendor/k8s.io/api/apps/v1beta2/types.go index 491afc59f5b..e9dc85df055 100644 --- a/e2e/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/e2e/vendor/k8s.io/api/apps/v1beta2/types.go @@ -35,6 +35,9 @@ const ( type ScaleSpec struct { // desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } @@ -63,6 +66,7 @@ type ScaleStatus struct { // +k8s:prerelease-lifecycle-gen:deprecated=1.9 // +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale +// +k8s:isSubresource=/scale // Scale represents a scaling request for a resource. type Scale struct { @@ -681,7 +685,7 @@ type RollingUpdateDaemonSet struct { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may diff --git a/e2e/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 40894341510..34d80af58d7 100644 --- a/e2e/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/e2e/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { diff --git a/e2e/vendor/k8s.io/api/authorization/v1/generated.proto b/e2e/vendor/k8s.io/api/authorization/v1/generated.proto index 37b05b8552f..ff529c969e4 100644 --- a/e2e/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/e2e/vendor/k8s.io/api/authorization/v1/generated.proto @@ -167,16 +167,10 @@ message ResourceAttributes { optional string name = 7; // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. - // - // This field is alpha-level. To use this field, you must enable the - // `AuthorizeWithSelectors` feature gate (disabled by default). // +optional optional FieldSelectorAttributes fieldSelector = 8; // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. - // - // This field is alpha-level. To use this field, you must enable the - // `AuthorizeWithSelectors` feature gate (disabled by default). // +optional optional LabelSelectorAttributes labelSelector = 9; } diff --git a/e2e/vendor/k8s.io/api/authorization/v1/types.go b/e2e/vendor/k8s.io/api/authorization/v1/types.go index 36f5fa41078..251e776b024 100644 --- a/e2e/vendor/k8s.io/api/authorization/v1/types.go +++ b/e2e/vendor/k8s.io/api/authorization/v1/types.go @@ -119,15 +119,9 @@ type ResourceAttributes struct { // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. - // - // This field is alpha-level. To use this field, you must enable the - // `AuthorizeWithSelectors` feature gate (disabled by default). // +optional FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. - // - // This field is alpha-level. To use this field, you must enable the - // `AuthorizeWithSelectors` feature gate (disabled by default). // +optional LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` } diff --git a/e2e/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index dc6b8a89ecd..29d0aa8463d 100644 --- a/e2e/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/e2e/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -87,8 +87,8 @@ var map_ResourceAttributes = map[string]string{ "resource": "Resource is one of the existing resource types. \"*\" means all.", "subresource": "Subresource is one of the existing resource types. \"\" means none.", "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", - "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", - "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/e2e/vendor/k8s.io/api/autoscaling/v1/generated.proto b/e2e/vendor/k8s.io/api/autoscaling/v1/generated.proto index 68c35b6b22b..a17d7989db5 100644 --- a/e2e/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/e2e/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -472,6 +472,9 @@ message Scale { message ScaleSpec { // replicas is the desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 optional int32 replicas = 1; } diff --git a/e2e/vendor/k8s.io/api/autoscaling/v1/types.go b/e2e/vendor/k8s.io/api/autoscaling/v1/types.go index 85c609e5c77..e1e8809fe9f 100644 --- a/e2e/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/e2e/vendor/k8s.io/api/autoscaling/v1/types.go @@ -117,6 +117,7 @@ type HorizontalPodAutoscalerList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 +// +k8s:isSubresource=/scale // Scale represents a scaling request for a resource. type Scale struct { @@ -138,6 +139,9 @@ type Scale struct { type ScaleSpec struct { // replicas is the desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } diff --git a/e2e/vendor/k8s.io/api/batch/v1/generated.proto b/e2e/vendor/k8s.io/api/batch/v1/generated.proto index d3aeae0adb4..c0ce8cef26c 100644 --- a/e2e/vendor/k8s.io/api/batch/v1/generated.proto +++ b/e2e/vendor/k8s.io/api/batch/v1/generated.proto @@ -226,7 +226,8 @@ message JobSpec { optional SuccessPolicy successPolicy = 16; // Specifies the number of retries before marking this job failed. - // Defaults to 6 + // Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. + // When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647. // +optional optional int32 backoffLimit = 7; @@ -329,8 +330,6 @@ message JobSpec { // // When using podFailurePolicy, Failed is the the only allowed value. // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. - // This is on by default. // +optional optional string podReplacementPolicy = 14; @@ -570,7 +569,7 @@ message PodFailurePolicyRule { message SuccessPolicy { // rules represents the list of alternative rules for the declaring the Jobs // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, - // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed. + // the "SuccessCriteriaMet" condition is added, and the lingering pods are removed. // The terminal state for such a Job has the "Complete" condition. // Additionally, these rules are evaluated in order; Once the Job meets one of the rules, // other rules are ignored. At most 20 elements are allowed. diff --git a/e2e/vendor/k8s.io/api/batch/v1/types.go b/e2e/vendor/k8s.io/api/batch/v1/types.go index 6c0007c21e4..9183c073d2a 100644 --- a/e2e/vendor/k8s.io/api/batch/v1/types.go +++ b/e2e/vendor/k8s.io/api/batch/v1/types.go @@ -257,7 +257,7 @@ type PodFailurePolicy struct { type SuccessPolicy struct { // rules represents the list of alternative rules for the declaring the Jobs // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, - // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed. + // the "SuccessCriteriaMet" condition is added, and the lingering pods are removed. // The terminal state for such a Job has the "Complete" condition. // Additionally, these rules are evaluated in order; Once the Job meets one of the rules, // other rules are ignored. At most 20 elements are allowed. @@ -347,7 +347,8 @@ type JobSpec struct { SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"` // Specifies the number of retries before marking this job failed. - // Defaults to 6 + // Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. + // When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647. // +optional BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"` @@ -455,8 +456,6 @@ type JobSpec struct { // // When using podFailurePolicy, Failed is the the only allowed value. // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. - // This is on by default. // +optional PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"` diff --git a/e2e/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index ffd4e4f5fea..451f4609f2d 100644 --- a/e2e/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/e2e/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -117,7 +117,7 @@ var map_JobSpec = map[string]string{ "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.", "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.", - "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", + "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.", "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.", "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", @@ -126,7 +126,7 @@ var map_JobSpec = map[string]string{ "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", - "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", + "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.", "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).", } @@ -206,7 +206,7 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string { var map_SuccessPolicy = map[string]string{ "": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.", - "rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.", + "rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SuccessCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.", } func (SuccessPolicy) SwaggerDoc() map[string]string { diff --git a/e2e/vendor/k8s.io/api/certificates/v1/generated.proto b/e2e/vendor/k8s.io/api/certificates/v1/generated.proto index dac7c7f5f22..24528fc8bc2 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/e2e/vendor/k8s.io/api/certificates/v1/generated.proto @@ -39,6 +39,8 @@ option go_package = "k8s.io/api/certificates/v1"; // This API can be used to request client certificates to authenticate to kube-apiserver // (with the "kubernetes.io/kube-apiserver-client" signerName), // or to obtain certificates from custom non-Kubernetes signers. +// +k8s:supportsSubresource=/status +// +k8s:supportsSubresource=/approval message CertificateSigningRequest { // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -203,6 +205,11 @@ message CertificateSigningRequestStatus { // +listType=map // +listMapKey=type // +optional + // +k8s:listType=map + // +k8s:listMapKey=type + // +k8s:optional + // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember + // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember repeated CertificateSigningRequestCondition conditions = 1; // certificate is populated with an issued certificate by the signer after an Approved condition is present. diff --git a/e2e/vendor/k8s.io/api/certificates/v1/types.go b/e2e/vendor/k8s.io/api/certificates/v1/types.go index ba8009840d8..71203e80d55 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1/types.go +++ b/e2e/vendor/k8s.io/api/certificates/v1/types.go @@ -39,6 +39,8 @@ import ( // This API can be used to request client certificates to authenticate to kube-apiserver // (with the "kubernetes.io/kube-apiserver-client" signerName), // or to obtain certificates from custom non-Kubernetes signers. +// +k8s:supportsSubresource=/status +// +k8s:supportsSubresource=/approval type CertificateSigningRequest struct { metav1.TypeMeta `json:",inline"` // +optional @@ -178,6 +180,11 @@ type CertificateSigningRequestStatus struct { // +listType=map // +listMapKey=type // +optional + // +k8s:listType=map + // +k8s:listMapKey=type + // +k8s:optional + // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember + // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` // certificate is populated with an issued certificate by the signer after an Approved condition is present. diff --git a/e2e/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/e2e/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go index a62a4005963..c260f0436da 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go +++ b/e2e/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go @@ -25,11 +25,14 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" reflect "reflect" strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -127,10 +130,126 @@ func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() { var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo +func (m *PodCertificateRequest) Reset() { *m = PodCertificateRequest{} } +func (*PodCertificateRequest) ProtoMessage() {} +func (*PodCertificateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{3} +} +func (m *PodCertificateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateRequest.Merge(m, src) +} +func (m *PodCertificateRequest) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateRequest proto.InternalMessageInfo + +func (m *PodCertificateRequestList) Reset() { *m = PodCertificateRequestList{} } +func (*PodCertificateRequestList) ProtoMessage() {} +func (*PodCertificateRequestList) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{4} +} +func (m *PodCertificateRequestList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateRequestList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateRequestList.Merge(m, src) +} +func (m *PodCertificateRequestList) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateRequestList) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateRequestList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateRequestList proto.InternalMessageInfo + +func (m *PodCertificateRequestSpec) Reset() { *m = PodCertificateRequestSpec{} } +func (*PodCertificateRequestSpec) ProtoMessage() {} +func (*PodCertificateRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{5} +} +func (m *PodCertificateRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateRequestSpec.Merge(m, src) +} +func (m *PodCertificateRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateRequestSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateRequestSpec proto.InternalMessageInfo + +func (m *PodCertificateRequestStatus) Reset() { *m = PodCertificateRequestStatus{} } +func (*PodCertificateRequestStatus) ProtoMessage() {} +func (*PodCertificateRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{6} +} +func (m *PodCertificateRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateRequestStatus.Merge(m, src) +} +func (m *PodCertificateRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateRequestStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateRequestStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle") proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList") proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec") + proto.RegisterType((*PodCertificateRequest)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequest") + proto.RegisterType((*PodCertificateRequestList)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestList") + proto.RegisterType((*PodCertificateRequestSpec)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestSpec") + proto.RegisterType((*PodCertificateRequestStatus)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestStatus") } func init() { @@ -138,35 +257,65 @@ func init() { } var fileDescriptor_f73d5fe56c015bb8 = []byte{ - // 437 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6a, 0xdb, 0x40, - 0x10, 0xc6, 0xb5, 0x69, 0x02, 0xc9, 0xba, 0x85, 0xa2, 0x42, 0x31, 0x3e, 0x6c, 0x8c, 0x4f, 0xb9, - 0x74, 0x37, 0x36, 0x69, 0xc9, 0x59, 0x85, 0x42, 0xa1, 0x7f, 0x40, 0xe9, 0xa5, 0xa1, 0x87, 0xae, - 0xd7, 0x13, 0x79, 0x6b, 0x4b, 0x5a, 0x76, 0x57, 0x86, 0xde, 0x0a, 0x7d, 0x81, 0x3e, 0x96, 0x8f, - 0x69, 0x4f, 0x39, 0x85, 0x5a, 0x7d, 0x91, 0xb2, 0x6b, 0xd9, 0x12, 0x55, 0x8b, 0x4b, 0x6e, 0x9a, - 0xd1, 0xfc, 0xbe, 0x6f, 0xbe, 0x11, 0xc2, 0xa7, 0xb3, 0x73, 0x43, 0x65, 0xce, 0xb8, 0x92, 0x4c, - 0x80, 0xb6, 0xf2, 0x4a, 0x0a, 0x6e, 0xc1, 0xb0, 0xc5, 0x90, 0xcf, 0xd5, 0x94, 0x0f, 0x59, 0x02, - 0x19, 0x68, 0x6e, 0x61, 0x42, 0x95, 0xce, 0x6d, 0x1e, 0xf6, 0xd7, 0x04, 0xe5, 0x4a, 0xd2, 0x26, - 0x41, 0x37, 0x44, 0xef, 0x49, 0x22, 0xed, 0xb4, 0x18, 0x53, 0x91, 0xa7, 0x2c, 0xc9, 0x93, 0x9c, - 0x79, 0x70, 0x5c, 0x5c, 0xf9, 0xca, 0x17, 0xfe, 0x69, 0x2d, 0xd8, 0x3b, 0xab, 0x57, 0x48, 0xb9, - 0x98, 0xca, 0x0c, 0xf4, 0x67, 0xa6, 0x66, 0x89, 0x6b, 0x18, 0x96, 0x82, 0xe5, 0x6c, 0xd1, 0x5a, - 0xa3, 0xc7, 0xfe, 0x45, 0xe9, 0x22, 0xb3, 0x32, 0x85, 0x16, 0xf0, 0x6c, 0x17, 0x60, 0xc4, 0x14, - 0x52, 0xfe, 0x27, 0x37, 0xf8, 0x81, 0x70, 0xf8, 0x7c, 0x5e, 0x18, 0x0b, 0xfa, 0x9d, 0x2e, 0x8c, - 0x8d, 0x8a, 0x6c, 0x32, 0x87, 0xf0, 0x23, 0x3e, 0x74, 0xab, 0x4d, 0xb8, 0xe5, 0x5d, 0xd4, 0x47, - 0x27, 0x9d, 0xd1, 0x29, 0xad, 0x2f, 0xb3, 0x75, 0xa0, 0x6a, 0x96, 0xb8, 0x86, 0xa1, 0x6e, 0x9a, - 0x2e, 0x86, 0xf4, 0xed, 0xf8, 0x13, 0x08, 0xfb, 0x1a, 0x2c, 0x8f, 0xc2, 0xe5, 0xed, 0x71, 0x50, - 0xde, 0x1e, 0xe3, 0xba, 0x17, 0x6f, 0x55, 0xc3, 0x4b, 0xbc, 0x6f, 0x14, 0x88, 0xee, 0x9e, 0x57, - 0x3f, 0xa7, 0xbb, 0xee, 0x4e, 0xdb, 0x5b, 0x5e, 0x28, 0x10, 0xd1, 0xfd, 0xca, 0x65, 0xdf, 0x55, - 0xb1, 0xd7, 0x1c, 0x7c, 0x47, 0xf8, 0x71, 0x7b, 0xfc, 0x95, 0x34, 0x36, 0xfc, 0xd0, 0x0a, 0x46, - 0xff, 0x2f, 0x98, 0xa3, 0x7d, 0xac, 0x87, 0x95, 0xe1, 0xe1, 0xa6, 0xd3, 0x08, 0xf5, 0x1e, 0x1f, - 0x48, 0x0b, 0xa9, 0xe9, 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0x67, 0x77, 0x49, 0x15, 0x3d, 0xa8, - 0x0c, 0x0e, 0x5e, 0x3a, 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0xd7, 0x4c, 0x2e, 0x74, 0x38, 0xc2, - 0xd8, 0xc8, 0x24, 0x03, 0xfd, 0x86, 0xa7, 0xe0, 0x53, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0xbe, 0x89, - 0x1b, 0x53, 0xe1, 0x53, 0xdc, 0xb1, 0xb5, 0x8c, 0xff, 0x0a, 0x47, 0xd1, 0xa3, 0x0a, 0xea, 0x34, - 0x1c, 0xe2, 0xe6, 0x5c, 0xf4, 0x62, 0xb9, 0x22, 0xc1, 0xf5, 0x8a, 0x04, 0x37, 0x2b, 0x12, 0x7c, - 0x29, 0x09, 0x5a, 0x96, 0x04, 0x5d, 0x97, 0x04, 0xdd, 0x94, 0x04, 0xfd, 0x2c, 0x09, 0xfa, 0xf6, - 0x8b, 0x04, 0x97, 0xfd, 0x5d, 0xbf, 0xdd, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x1c, 0xcb, - 0xdd, 0x99, 0x03, 0x00, 0x00, + // 918 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0xb6, 0x69, 0x9b, 0x49, 0x5b, 0xda, 0x61, 0x17, 0x99, 0x22, 0x39, 0x21, 0x07, + 0x54, 0x90, 0xb0, 0xb7, 0xa5, 0xb0, 0x2b, 0x10, 0x48, 0x75, 0x0a, 0x52, 0xe9, 0x6e, 0x36, 0x9a, + 0x74, 0xf9, 0xb1, 0x5a, 0x24, 0x1c, 0xe7, 0x25, 0x19, 0x1a, 0x7b, 0x8c, 0x67, 0x5c, 0xb5, 0x37, + 0x24, 0xfe, 0x01, 0xfe, 0x23, 0xae, 0x3d, 0x2e, 0x5c, 0xd8, 0x53, 0xa0, 0xe6, 0x6f, 0xe0, 0xb2, + 0x27, 0xe4, 0xb1, 0x9d, 0x5f, 0x4e, 0xb6, 0xd9, 0x1e, 0x7a, 0xcb, 0xbc, 0x79, 0xdf, 0xcf, 0xfb, + 0xbe, 0x99, 0x37, 0x56, 0xd0, 0xbd, 0xd3, 0x07, 0x5c, 0xa7, 0xcc, 0xb0, 0x3c, 0x6a, 0xd8, 0xe0, + 0x0b, 0xda, 0xa6, 0xb6, 0x25, 0x80, 0x1b, 0x67, 0xbb, 0x56, 0xcf, 0xeb, 0x5a, 0xbb, 0x46, 0x07, + 0x5c, 0xf0, 0x2d, 0x01, 0x2d, 0xdd, 0xf3, 0x99, 0x60, 0xb8, 0x1c, 0x2b, 0x74, 0xcb, 0xa3, 0xfa, + 0xa8, 0x42, 0x4f, 0x15, 0xdb, 0x1f, 0x76, 0xa8, 0xe8, 0x06, 0x4d, 0xdd, 0x66, 0x8e, 0xd1, 0x61, + 0x1d, 0x66, 0x48, 0x61, 0x33, 0x68, 0xcb, 0x95, 0x5c, 0xc8, 0x5f, 0x31, 0x70, 0x7b, 0x7f, 0x68, + 0xc1, 0xb1, 0xec, 0x2e, 0x75, 0xc1, 0xbf, 0x30, 0xbc, 0xd3, 0x4e, 0x14, 0xe0, 0x86, 0x03, 0xc2, + 0x32, 0xce, 0x32, 0x36, 0xb6, 0x8d, 0x59, 0x2a, 0x3f, 0x70, 0x05, 0x75, 0x20, 0x23, 0xf8, 0xe4, + 0x3a, 0x01, 0xb7, 0xbb, 0xe0, 0x58, 0x93, 0xba, 0xca, 0x9f, 0x0a, 0xc2, 0xd5, 0x5e, 0xc0, 0x05, + 0xf8, 0x27, 0x7e, 0xc0, 0x85, 0x19, 0xb8, 0xad, 0x1e, 0xe0, 0x1f, 0xd1, 0x6a, 0x64, 0xad, 0x65, + 0x09, 0x4b, 0x55, 0xca, 0xca, 0x4e, 0x71, 0xef, 0x9e, 0x3e, 0x3c, 0x99, 0x41, 0x05, 0xdd, 0x3b, + 0xed, 0x44, 0x01, 0xae, 0x47, 0xd9, 0xfa, 0xd9, 0xae, 0xfe, 0xb8, 0xf9, 0x13, 0xd8, 0xe2, 0x11, + 0x08, 0xcb, 0xc4, 0x97, 0xfd, 0x52, 0x2e, 0xec, 0x97, 0xd0, 0x30, 0x46, 0x06, 0x54, 0xfc, 0x14, + 0x2d, 0x71, 0x0f, 0x6c, 0x75, 0x41, 0xd2, 0x1f, 0xe8, 0xd7, 0x9d, 0xbb, 0x9e, 0x75, 0xd9, 0xf0, + 0xc0, 0x36, 0xd7, 0x92, 0x2a, 0x4b, 0xd1, 0x8a, 0x48, 0x66, 0xe5, 0x0f, 0x05, 0xbd, 0x95, 0x4d, + 0x7f, 0x48, 0xb9, 0xc0, 0xcf, 0x32, 0x8d, 0xe9, 0xf3, 0x35, 0x16, 0xa9, 0x65, 0x5b, 0x9b, 0x49, + 0xc1, 0xd5, 0x34, 0x32, 0xd2, 0xd4, 0xf7, 0x28, 0x4f, 0x05, 0x38, 0x5c, 0x5d, 0x28, 0x2f, 0xee, + 0x14, 0xf7, 0xf6, 0x6f, 0xd2, 0x95, 0xb9, 0x9e, 0x14, 0xc8, 0x1f, 0x45, 0x28, 0x12, 0x13, 0x2b, + 0xbf, 0x4e, 0xed, 0x29, 0x6a, 0x1a, 0xef, 0x21, 0xc4, 0x69, 0xc7, 0x05, 0xbf, 0x66, 0x39, 0x20, + 0xbb, 0x2a, 0x0c, 0x0f, 0xbf, 0x31, 0xd8, 0x21, 0x23, 0x59, 0xf8, 0x63, 0x54, 0x14, 0x43, 0x8c, + 0xbc, 0x85, 0x82, 0xf9, 0x66, 0x22, 0x2a, 0x8e, 0x54, 0x20, 0xa3, 0x79, 0x95, 0xdf, 0x17, 0xd0, + 0xdd, 0x3a, 0x6b, 0x55, 0x87, 0xbd, 0x10, 0xf8, 0x39, 0x00, 0x2e, 0x6e, 0x61, 0x62, 0x7e, 0x18, + 0x9b, 0x98, 0xcf, 0xae, 0x3f, 0xdb, 0xa9, 0x46, 0x67, 0x0d, 0x0d, 0x06, 0xb4, 0xcc, 0x85, 0x25, + 0x02, 0xae, 0x2e, 0xca, 0x02, 0x9f, 0xdf, 0xb4, 0x80, 0x84, 0x98, 0x1b, 0x49, 0x89, 0xe5, 0x78, + 0x4d, 0x12, 0x78, 0xe5, 0x2f, 0x05, 0xbd, 0x3d, 0x55, 0x77, 0x0b, 0xe3, 0xf9, 0x6c, 0x7c, 0x3c, + 0xef, 0xdf, 0xb0, 0xc3, 0x19, 0x13, 0xfa, 0x5f, 0x7e, 0x46, 0x67, 0x37, 0x1e, 0xd2, 0xf7, 0xd1, + 0x8a, 0xc7, 0x5a, 0x52, 0x10, 0x0f, 0xe8, 0x1b, 0x89, 0x60, 0xa5, 0x1e, 0x87, 0x49, 0xba, 0x8f, + 0x8f, 0xd1, 0xb2, 0xc7, 0x5a, 0x4f, 0x8e, 0x0e, 0xe5, 0xed, 0x15, 0xcc, 0x8f, 0xd2, 0xe3, 0xaf, + 0xcb, 0xe8, 0xcb, 0x7e, 0xe9, 0xdd, 0x59, 0x5f, 0x48, 0x71, 0xe1, 0x01, 0xd7, 0x9f, 0x1c, 0x1d, + 0x92, 0x04, 0x81, 0xbf, 0x46, 0x98, 0x83, 0x7f, 0x46, 0x6d, 0x38, 0xb0, 0x6d, 0x16, 0xb8, 0x42, + 0x5a, 0x58, 0x92, 0xe0, 0xed, 0x04, 0x8c, 0x1b, 0x99, 0x0c, 0x32, 0x45, 0x85, 0x7b, 0x68, 0x6b, + 0x3c, 0x1a, 0x79, 0xcc, 0x4b, 0xd4, 0x17, 0x09, 0x6a, 0xab, 0x31, 0x99, 0x30, 0x9f, 0xdd, 0x2c, + 0x18, 0x7f, 0x83, 0x56, 0x5d, 0xd6, 0x02, 0xe9, 0x77, 0x59, 0x16, 0xf9, 0x34, 0x9d, 0x87, 0x5a, + 0x12, 0x7f, 0xd9, 0x2f, 0xbd, 0xf7, 0x6a, 0x76, 0x9a, 0x49, 0x06, 0x2c, 0x5c, 0x43, 0x2b, 0xd1, + 0xef, 0xc8, 0xfb, 0x8a, 0xc4, 0xee, 0xa7, 0x37, 0x51, 0x8b, 0xc3, 0xf3, 0x39, 0x4e, 0x21, 0xf8, + 0x21, 0xba, 0xe3, 0x58, 0xe7, 0x5f, 0x9e, 0x7b, 0xd4, 0xb7, 0x04, 0x65, 0x6e, 0x03, 0x6c, 0xe6, + 0xb6, 0xb8, 0xba, 0x5a, 0x56, 0x76, 0xf2, 0xa6, 0x1a, 0xf6, 0x4b, 0x77, 0x1e, 0x4d, 0xd9, 0x27, + 0x53, 0x55, 0xf8, 0x3e, 0x5a, 0xf7, 0x4e, 0xe9, 0x79, 0x3d, 0x68, 0xf6, 0xa8, 0x7d, 0x0c, 0x17, + 0x6a, 0xa1, 0xac, 0xec, 0xac, 0x99, 0x5b, 0x61, 0xbf, 0xb4, 0x5e, 0x3f, 0x3e, 0xfa, 0x6e, 0xb0, + 0x41, 0xc6, 0xf3, 0x70, 0x15, 0x6d, 0x79, 0x3e, 0x63, 0xed, 0xc7, 0xed, 0x3a, 0xe3, 0x1c, 0x38, + 0xa7, 0xcc, 0x55, 0x91, 0x14, 0xdf, 0x8d, 0x2e, 0xa6, 0x3e, 0xb9, 0x49, 0xb2, 0xf9, 0x95, 0xbf, + 0x17, 0xd1, 0x3b, 0xaf, 0xf8, 0x12, 0x60, 0x1b, 0xa1, 0xc8, 0x26, 0x8d, 0x1c, 0x73, 0x55, 0x91, + 0x4f, 0xcf, 0x98, 0xef, 0x55, 0x57, 0x53, 0xdd, 0xf0, 0xa9, 0x0c, 0x42, 0x9c, 0x8c, 0x60, 0xf1, + 0x21, 0xda, 0x1c, 0x79, 0xc1, 0xd5, 0xae, 0x45, 0xdd, 0xe4, 0xcd, 0xa8, 0x89, 0x72, 0xb3, 0x3a, + 0xb1, 0x4f, 0x32, 0x0a, 0xfc, 0x2d, 0x2a, 0xb8, 0x4c, 0x98, 0xd0, 0x66, 0x7e, 0x3c, 0xef, 0xc5, + 0xbd, 0x0f, 0xe6, 0x73, 0x7a, 0x42, 0x1d, 0x30, 0xd7, 0xc3, 0x7e, 0xa9, 0x50, 0x4b, 0x01, 0x64, + 0xc8, 0xc2, 0x6d, 0xb4, 0xd1, 0x84, 0x0e, 0x75, 0x09, 0xb4, 0x7d, 0xe0, 0xdd, 0x03, 0x21, 0x9f, + 0xc0, 0xeb, 0xd1, 0x71, 0xd8, 0x2f, 0x6d, 0x98, 0x63, 0x14, 0x32, 0x41, 0xc5, 0x27, 0xd1, 0xfc, + 0x8b, 0x83, 0xb6, 0x00, 0x5f, 0xce, 0xff, 0xeb, 0x55, 0x58, 0x8b, 0xdf, 0x49, 0xac, 0x27, 0x03, + 0x92, 0xf9, 0xd5, 0xe5, 0x95, 0x96, 0x7b, 0x7e, 0xa5, 0xe5, 0x5e, 0x5c, 0x69, 0xb9, 0x5f, 0x42, + 0x4d, 0xb9, 0x0c, 0x35, 0xe5, 0x79, 0xa8, 0x29, 0x2f, 0x42, 0x4d, 0xf9, 0x27, 0xd4, 0x94, 0xdf, + 0xfe, 0xd5, 0x72, 0x4f, 0xcb, 0xd7, 0xfd, 0xd9, 0xfc, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x6c, + 0x5a, 0xc4, 0x8f, 0x0a, 0x00, 0x00, } func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) { @@ -292,6 +441,261 @@ func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *PodCertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodCertificateRequestList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateRequestList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodCertificateRequestSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateRequestSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProofOfPossession != nil { + i -= len(m.ProofOfPossession) + copy(dAtA[i:], m.ProofOfPossession) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProofOfPossession))) + i-- + dAtA[i] = 0x52 + } + if m.PKIXPublicKey != nil { + i -= len(m.PKIXPublicKey) + copy(dAtA[i:], m.PKIXPublicKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PKIXPublicKey))) + i-- + dAtA[i] = 0x4a + } + if m.MaxExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds)) + i-- + dAtA[i] = 0x40 + } + i -= len(m.NodeUID) + copy(dAtA[i:], m.NodeUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeUID))) + i-- + dAtA[i] = 0x3a + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceAccountUID) + copy(dAtA[i:], m.ServiceAccountUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountUID))) + i-- + dAtA[i] = 0x2a + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x22 + i -= len(m.PodUID) + copy(dAtA[i:], m.PodUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodUID))) + i-- + dAtA[i] = 0x1a + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodCertificateRequestStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateRequestStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NotAfter != nil { + { + size, err := m.NotAfter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.BeginRefreshAt != nil { + { + size, err := m.BeginRefreshAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.NotBefore != nil { + { + size, err := m.NotBefore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.CertificateChain) + copy(dAtA[i:], m.CertificateChain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChain))) + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -346,25 +750,120 @@ func (m *ClusterTrustBundleSpec) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ClusterTrustBundle) String() string { - if this == nil { - return "nil" +func (m *PodCertificateRequest) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ClusterTrustBundle{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterTrustBundleList) String() string { - if this == nil { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCertificateRequestList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodCertificateRequestSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServiceAccountUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeUID) + n += 1 + l + sovGenerated(uint64(l)) + if m.MaxExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds)) + } + if m.PKIXPublicKey != nil { + l = len(m.PKIXPublicKey) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProofOfPossession != nil { + l = len(m.ProofOfPossession) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodCertificateRequestStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.CertificateChain) + n += 1 + l + sovGenerated(uint64(l)) + if m.NotBefore != nil { + l = m.NotBefore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BeginRefreshAt != nil { + l = m.BeginRefreshAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NotAfter != nil { + l = m.NotAfter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterTrustBundle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundle{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleList) String() string { + if this == nil { return "nil" } repeatedStringForItems := "[]ClusterTrustBundle{" @@ -390,6 +889,72 @@ func (this *ClusterTrustBundleSpec) String() string { }, "") return s } +func (this *PodCertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCertificateRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodCertificateRequestSpec", "PodCertificateRequestSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodCertificateRequestStatus", "PodCertificateRequestStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodCertificateRequestList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodCertificateRequest{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodCertificateRequest", "PodCertificateRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodCertificateRequestList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodCertificateRequestSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCertificateRequestSpec{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`, + `PodUID:` + fmt.Sprintf("%v", this.PodUID) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `ServiceAccountUID:` + fmt.Sprintf("%v", this.ServiceAccountUID) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NodeUID:` + fmt.Sprintf("%v", this.NodeUID) + `,`, + `MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`, + `PKIXPublicKey:` + valueToStringGenerated(this.PKIXPublicKey) + `,`, + `ProofOfPossession:` + valueToStringGenerated(this.ProofOfPossession) + `,`, + `}`, + }, "") + return s +} +func (this *PodCertificateRequestStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PodCertificateRequestStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `CertificateChain:` + fmt.Sprintf("%v", this.CertificateChain) + `,`, + `NotBefore:` + strings.Replace(fmt.Sprintf("%v", this.NotBefore), "Time", "v1.Time", 1) + `,`, + `BeginRefreshAt:` + strings.Replace(fmt.Sprintf("%v", this.BeginRefreshAt), "Time", "v1.Time", 1) + `,`, + `NotAfter:` + strings.Replace(fmt.Sprintf("%v", this.NotAfter), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -745,6 +1310,858 @@ func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error { } return nil } +func (m *PodCertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCertificateRequestList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateRequestList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCertificateRequestList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodCertificateRequest{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateRequestSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCertificateRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = k8s_io_apimachinery_pkg_types.NodeName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxExpirationSeconds = &v + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PKIXPublicKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PKIXPublicKey = append(m.PKIXPublicKey[:0], dAtA[iNdEx:postIndex]...) + if m.PKIXPublicKey == nil { + m.PKIXPublicKey = []byte{} + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofOfPossession", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofOfPossession = append(m.ProofOfPossession[:0], dAtA[iNdEx:postIndex]...) + if m.ProofOfPossession == nil { + m.ProofOfPossession = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateRequestStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCertificateRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertificateChain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertificateChain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotBefore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NotBefore == nil { + m.NotBefore = &v1.Time{} + } + if err := m.NotBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginRefreshAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeginRefreshAt == nil { + m.BeginRefreshAt = &v1.Time{} + } + if err := m.BeginRefreshAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotAfter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NotAfter == nil { + m.NotAfter = &v1.Time{} + } + if err := m.NotAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/e2e/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/e2e/vendor/k8s.io/api/certificates/v1alpha1/generated.proto index 7155f778cff..194bdbc14ff 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1alpha1/generated.proto +++ b/e2e/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -101,3 +101,208 @@ message ClusterTrustBundleSpec { optional string trustBundle = 2; } +// PodCertificateRequest encodes a pod requesting a certificate from a given +// signer. +// +// Kubelets use this API to implement podCertificate projected volumes +message PodCertificateRequest { + // metadata contains the object metadata. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the details about the certificate being requested. + optional PodCertificateRequestSpec spec = 2; + + // status contains the issued certificate, and a standard set of conditions. + // +optional + optional PodCertificateRequestStatus status = 3; +} + +// PodCertificateRequestList is a collection of PodCertificateRequest objects +message PodCertificateRequestList { + // metadata contains the list metadata. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a collection of PodCertificateRequest objects + repeated PodCertificateRequest items = 2; +} + +// PodCertificateRequestSpec describes the certificate request. All fields are +// immutable after creation. +message PodCertificateRequestSpec { + // signerName indicates the requested signer. + // + // All signer names beginning with `kubernetes.io` are reserved for use by + // the Kubernetes project. There is currently one well-known signer + // documented by the Kubernetes project, + // `kubernetes.io/kube-apiserver-client-pod`, which will issue client + // certificates understood by kube-apiserver. It is currently + // unimplemented. + // + // +required + optional string signerName = 1; + + // podName is the name of the pod into which the certificate will be mounted. + // + // +required + optional string podName = 2; + + // podUID is the UID of the pod into which the certificate will be mounted. + // + // +required + optional string podUID = 3; + + // serviceAccountName is the name of the service account the pod is running as. + // + // +required + optional string serviceAccountName = 4; + + // serviceAccountUID is the UID of the service account the pod is running as. + // + // +required + optional string serviceAccountUID = 5; + + // nodeName is the name of the node the pod is assigned to. + // + // +required + optional string nodeName = 6; + + // nodeUID is the UID of the node the pod is assigned to. + // + // +required + optional string nodeUID = 7; + + // maxExpirationSeconds is the maximum lifetime permitted for the + // certificate. + // + // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + // will reject values shorter than 3600 (1 hour). The maximum allowable + // value is 7862400 (91 days). + // + // The signer implementation is then free to issue a certificate with any + // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + // seconds (1 hour). This constraint is enforced by kube-apiserver. + // `kubernetes.io` signers will never issue certificates with a lifetime + // longer than 24 hours. + // + // +optional + // +default=86400 + optional int32 maxExpirationSeconds = 8; + + // pkixPublicKey is the PKIX-serialized public key the signer will issue the + // certificate to. + // + // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, + // or ED25519. Note that this list may be expanded in the future. + // + // Signer implementations do not need to support all key types supported by + // kube-apiserver and kubelet. If a signer does not support the key type + // used for a given PodCertificateRequest, it must deny the request by + // setting a status.conditions entry with a type of "Denied" and a reason of + // "UnsupportedKeyType". It may also suggest a key type that it does support + // in the message field. + // + // +required + optional bytes pkixPublicKey = 9; + + // proofOfPossession proves that the requesting kubelet holds the private + // key corresponding to pkixPublicKey. + // + // It is contructed by signing the ASCII bytes of the pod's UID using + // `pkixPublicKey`. + // + // kube-apiserver validates the proof of possession during creation of the + // PodCertificateRequest. + // + // If the key is an RSA key, then the signature is over the ASCII bytes of + // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang + // function crypto/rsa.SignPSS with nil options). + // + // If the key is an ECDSA key, then the signature is as described by [SEC 1, + // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the + // golang library function crypto/ecdsa.SignASN1) + // + // If the key is an ED25519 key, the the signature is as described by the + // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by + // the golang library crypto/ed25519.Sign). + // + // +required + optional bytes proofOfPossession = 10; +} + +// PodCertificateRequestStatus describes the status of the request, and holds +// the certificate data if the request is issued. +message PodCertificateRequestStatus { + // conditions applied to the request. + // + // The types "Issued", "Denied", and "Failed" have special handling. At + // most one of these conditions may be present, and they must have status + // "True". + // + // If the request is denied with `Reason=UnsupportedKeyType`, the signer may + // suggest a key type that will work in the message field. + // + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; + + // certificateChain is populated with an issued certificate by the signer. + // This field is set via the /status subresource. Once populated, this field + // is immutable. + // + // If the certificate signing request is denied, a condition of type + // "Denied" is added and this field remains empty. If the signer cannot + // issue the certificate, a condition of type "Failed" is added and this + // field remains empty. + // + // Validation requirements: + // 1. certificateChain must consist of one or more PEM-formatted certificates. + // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as + // described in section 4 of RFC5280. + // + // If more than one block is present, and the definition of the requested + // spec.signerName does not indicate otherwise, the first block is the + // issued certificate, and subsequent blocks should be treated as + // intermediate certificates and presented in TLS handshakes. When + // projecting the chain into a pod volume, kubelet will drop any data + // in-between the PEM blocks, as well as any PEM block headers. + // + // +optional + optional string certificateChain = 2; + + // notBefore is the time at which the certificate becomes valid. The value + // must be the same as the notBefore value in the leaf certificate in + // certificateChain. This field is set via the /status subresource. Once + // populated, it is immutable. The signer must set this field at the same + // time it sets certificateChain. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notBefore = 4; + + // beginRefreshAt is the time at which the kubelet should begin trying to + // refresh the certificate. This field is set via the /status subresource, + // and must be set at the same time as certificateChain. Once populated, + // this field is immutable. + // + // This field is only a hint. Kubelet may start refreshing before or after + // this time if necessary. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time beginRefreshAt = 5; + + // notAfter is the time at which the certificate expires. The value must be + // the same as the notAfter value in the leaf certificate in + // certificateChain. This field is set via the /status subresource. Once + // populated, it is immutable. The signer must set this field at the same + // time it sets certificateChain. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notAfter = 6; +} + diff --git a/e2e/vendor/k8s.io/api/certificates/v1alpha1/register.go b/e2e/vendor/k8s.io/api/certificates/v1alpha1/register.go index 7288ed9a3e8..ae541e15c12 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1alpha1/register.go +++ b/e2e/vendor/k8s.io/api/certificates/v1alpha1/register.go @@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ClusterTrustBundle{}, &ClusterTrustBundleList{}, + &PodCertificateRequest{}, + &PodCertificateRequestList{}, ) // Add the watch version that applies diff --git a/e2e/vendor/k8s.io/api/certificates/v1alpha1/types.go b/e2e/vendor/k8s.io/api/certificates/v1alpha1/types.go index beef02599d0..a5cb3809e7f 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1alpha1/types.go +++ b/e2e/vendor/k8s.io/api/certificates/v1alpha1/types.go @@ -18,6 +18,7 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) // +genclient @@ -106,3 +107,233 @@ type ClusterTrustBundleList struct { // items is a collection of ClusterTrustBundle objects Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +k8s:prerelease-lifecycle-gen:introduced=1.34 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodCertificateRequest encodes a pod requesting a certificate from a given +// signer. +// +// Kubelets use this API to implement podCertificate projected volumes +type PodCertificateRequest struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the object metadata. + // + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the details about the certificate being requested. + Spec PodCertificateRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // status contains the issued certificate, and a standard set of conditions. + // +optional + Status PodCertificateRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodCertificateRequestSpec describes the certificate request. All fields are +// immutable after creation. +type PodCertificateRequestSpec struct { + // signerName indicates the requested signer. + // + // All signer names beginning with `kubernetes.io` are reserved for use by + // the Kubernetes project. There is currently one well-known signer + // documented by the Kubernetes project, + // `kubernetes.io/kube-apiserver-client-pod`, which will issue client + // certificates understood by kube-apiserver. It is currently + // unimplemented. + // + // +required + SignerName string `json:"signerName" protobuf:"bytes,1,opt,name=signerName"` + + // podName is the name of the pod into which the certificate will be mounted. + // + // +required + PodName string `json:"podName" protobuf:"bytes,2,opt,name=podName"` + // podUID is the UID of the pod into which the certificate will be mounted. + // + // +required + PodUID types.UID `json:"podUID" protobuf:"bytes,3,opt,name=podUID"` + + // serviceAccountName is the name of the service account the pod is running as. + // + // +required + ServiceAccountName string `json:"serviceAccountName" protobuf:"bytes,4,opt,name=serviceAccountName"` + // serviceAccountUID is the UID of the service account the pod is running as. + // + // +required + ServiceAccountUID types.UID `json:"serviceAccountUID" protobuf:"bytes,5,opt,name=serviceAccountUID"` + + // nodeName is the name of the node the pod is assigned to. + // + // +required + NodeName types.NodeName `json:"nodeName" protobuf:"bytes,6,opt,name=nodeName"` + // nodeUID is the UID of the node the pod is assigned to. + // + // +required + NodeUID types.UID `json:"nodeUID" protobuf:"bytes,7,opt,name=nodeUID"` + + // maxExpirationSeconds is the maximum lifetime permitted for the + // certificate. + // + // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + // will reject values shorter than 3600 (1 hour). The maximum allowable + // value is 7862400 (91 days). + // + // The signer implementation is then free to issue a certificate with any + // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + // seconds (1 hour). This constraint is enforced by kube-apiserver. + // `kubernetes.io` signers will never issue certificates with a lifetime + // longer than 24 hours. + // + // +optional + // +default=86400 + MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,8,opt,name=maxExpirationSeconds"` + + // pkixPublicKey is the PKIX-serialized public key the signer will issue the + // certificate to. + // + // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, + // or ED25519. Note that this list may be expanded in the future. + // + // Signer implementations do not need to support all key types supported by + // kube-apiserver and kubelet. If a signer does not support the key type + // used for a given PodCertificateRequest, it must deny the request by + // setting a status.conditions entry with a type of "Denied" and a reason of + // "UnsupportedKeyType". It may also suggest a key type that it does support + // in the message field. + // + // +required + PKIXPublicKey []byte `json:"pkixPublicKey" protobuf:"bytes,9,opt,name=pkixPublicKey"` + + // proofOfPossession proves that the requesting kubelet holds the private + // key corresponding to pkixPublicKey. + // + // It is contructed by signing the ASCII bytes of the pod's UID using + // `pkixPublicKey`. + // + // kube-apiserver validates the proof of possession during creation of the + // PodCertificateRequest. + // + // If the key is an RSA key, then the signature is over the ASCII bytes of + // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang + // function crypto/rsa.SignPSS with nil options). + // + // If the key is an ECDSA key, then the signature is as described by [SEC 1, + // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the + // golang library function crypto/ecdsa.SignASN1) + // + // If the key is an ED25519 key, the the signature is as described by the + // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by + // the golang library crypto/ed25519.Sign). + // + // +required + ProofOfPossession []byte `json:"proofOfPossession" protobuf:"bytes,10,opt,name=proofOfPossession"` +} + +// PodCertificateRequestStatus describes the status of the request, and holds +// the certificate data if the request is issued. +type PodCertificateRequestStatus struct { + // conditions applied to the request. + // + // The types "Issued", "Denied", and "Failed" have special handling. At + // most one of these conditions may be present, and they must have status + // "True". + // + // If the request is denied with `Reason=UnsupportedKeyType`, the signer may + // suggest a key type that will work in the message field. + // + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // certificateChain is populated with an issued certificate by the signer. + // This field is set via the /status subresource. Once populated, this field + // is immutable. + // + // If the certificate signing request is denied, a condition of type + // "Denied" is added and this field remains empty. If the signer cannot + // issue the certificate, a condition of type "Failed" is added and this + // field remains empty. + // + // Validation requirements: + // 1. certificateChain must consist of one or more PEM-formatted certificates. + // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as + // described in section 4 of RFC5280. + // + // If more than one block is present, and the definition of the requested + // spec.signerName does not indicate otherwise, the first block is the + // issued certificate, and subsequent blocks should be treated as + // intermediate certificates and presented in TLS handshakes. When + // projecting the chain into a pod volume, kubelet will drop any data + // in-between the PEM blocks, as well as any PEM block headers. + // + // +optional + CertificateChain string `json:"certificateChain,omitempty" protobuf:"bytes,2,opt,name=certificateChain"` + + // notBefore is the time at which the certificate becomes valid. The value + // must be the same as the notBefore value in the leaf certificate in + // certificateChain. This field is set via the /status subresource. Once + // populated, it is immutable. The signer must set this field at the same + // time it sets certificateChain. + // + // +optional + NotBefore *metav1.Time `json:"notBefore,omitempty" protobuf:"bytes,4,opt,name=notBefore"` + + // beginRefreshAt is the time at which the kubelet should begin trying to + // refresh the certificate. This field is set via the /status subresource, + // and must be set at the same time as certificateChain. Once populated, + // this field is immutable. + // + // This field is only a hint. Kubelet may start refreshing before or after + // this time if necessary. + // + // +optional + BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty" protobuf:"bytes,5,opt,name=beginRefreshAt"` + + // notAfter is the time at which the certificate expires. The value must be + // the same as the notAfter value in the leaf certificate in + // certificateChain. This field is set via the /status subresource. Once + // populated, it is immutable. The signer must set this field at the same + // time it sets certificateChain. + // + // +optional + NotAfter *metav1.Time `json:"notAfter,omitempty" protobuf:"bytes,6,opt,name=notAfter"` +} + +// Well-known condition types for PodCertificateRequests +const ( + // Denied indicates the request was denied by the signer. + PodCertificateRequestConditionTypeDenied string = "Denied" + // Failed indicates the signer failed to issue the certificate. + PodCertificateRequestConditionTypeFailed string = "Failed" + // Issued indicates the certificate has been issued. + PodCertificateRequestConditionTypeIssued string = "Issued" +) + +// Well-known condition reasons for PodCertificateRequests +const ( + // UnsupportedKeyType should be set on "Denied" conditions when the signer + // doesn't support the key type of publicKey. + PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.34 + +// PodCertificateRequestList is a collection of PodCertificateRequest objects +type PodCertificateRequestList struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the list metadata. + // + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a collection of PodCertificateRequest objects + Items []PodCertificateRequest `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/e2e/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go index bff649e3cbd..d29f2d8505f 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go +++ b/e2e/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go @@ -57,4 +57,56 @@ func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string { return map_ClusterTrustBundleSpec } +var map_PodCertificateRequest = map[string]string{ + "": "PodCertificateRequest encodes a pod requesting a certificate from a given signer.\n\nKubelets use this API to implement podCertificate projected volumes", + "metadata": "metadata contains the object metadata.", + "spec": "spec contains the details about the certificate being requested.", + "status": "status contains the issued certificate, and a standard set of conditions.", +} + +func (PodCertificateRequest) SwaggerDoc() map[string]string { + return map_PodCertificateRequest +} + +var map_PodCertificateRequestList = map[string]string{ + "": "PodCertificateRequestList is a collection of PodCertificateRequest objects", + "metadata": "metadata contains the list metadata.", + "items": "items is a collection of PodCertificateRequest objects", +} + +func (PodCertificateRequestList) SwaggerDoc() map[string]string { + return map_PodCertificateRequestList +} + +var map_PodCertificateRequestSpec = map[string]string{ + "": "PodCertificateRequestSpec describes the certificate request. All fields are immutable after creation.", + "signerName": "signerName indicates the requested signer.\n\nAll signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project. There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver. It is currently unimplemented.", + "podName": "podName is the name of the pod into which the certificate will be mounted.", + "podUID": "podUID is the UID of the pod into which the certificate will be mounted.", + "serviceAccountName": "serviceAccountName is the name of the service account the pod is running as.", + "serviceAccountUID": "serviceAccountUID is the UID of the service account the pod is running as.", + "nodeName": "nodeName is the name of the node the pod is assigned to.", + "nodeUID": "nodeUID is the UID of the node the pod is assigned to.", + "maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.", + "pkixPublicKey": "pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to.\n\nThe key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future.\n\nSigner implementations do not need to support all key types supported by kube-apiserver and kubelet. If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field.", + "proofOfPossession": "proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey.\n\nIt is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`.\n\nkube-apiserver validates the proof of possession during creation of the PodCertificateRequest.\n\nIf the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options).\n\nIf the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1)\n\nIf the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign).", +} + +func (PodCertificateRequestSpec) SwaggerDoc() map[string]string { + return map_PodCertificateRequestSpec +} + +var map_PodCertificateRequestStatus = map[string]string{ + "": "PodCertificateRequestStatus describes the status of the request, and holds the certificate data if the request is issued.", + "conditions": "conditions applied to the request.\n\nThe types \"Issued\", \"Denied\", and \"Failed\" have special handling. At most one of these conditions may be present, and they must have status \"True\".\n\nIf the request is denied with `Reason=UnsupportedKeyType`, the signer may suggest a key type that will work in the message field.", + "certificateChain": "certificateChain is populated with an issued certificate by the signer. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificateChain must consist of one or more PEM-formatted certificates.\n 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as\n described in section 4 of RFC5280.\n\nIf more than one block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. When projecting the chain into a pod volume, kubelet will drop any data in-between the PEM blocks, as well as any PEM block headers.", + "notBefore": "notBefore is the time at which the certificate becomes valid. The value must be the same as the notBefore value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.", + "beginRefreshAt": "beginRefreshAt is the time at which the kubelet should begin trying to refresh the certificate. This field is set via the /status subresource, and must be set at the same time as certificateChain. Once populated, this field is immutable.\n\nThis field is only a hint. Kubelet may start refreshing before or after this time if necessary.", + "notAfter": "notAfter is the time at which the certificate expires. The value must be the same as the notAfter value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.", +} + +func (PodCertificateRequestStatus) SwaggerDoc() map[string]string { + return map_PodCertificateRequestStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/e2e/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/e2e/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go index 30a4dc1e80d..25bc0ed6cbc 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go +++ b/e2e/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -100,3 +101,130 @@ func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateRequest) DeepCopyInto(out *PodCertificateRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequest. +func (in *PodCertificateRequest) DeepCopy() *PodCertificateRequest { + if in == nil { + return nil + } + out := new(PodCertificateRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodCertificateRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateRequestList) DeepCopyInto(out *PodCertificateRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodCertificateRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestList. +func (in *PodCertificateRequestList) DeepCopy() *PodCertificateRequestList { + if in == nil { + return nil + } + out := new(PodCertificateRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodCertificateRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateRequestSpec) DeepCopyInto(out *PodCertificateRequestSpec) { + *out = *in + if in.MaxExpirationSeconds != nil { + in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds + *out = new(int32) + **out = **in + } + if in.PKIXPublicKey != nil { + in, out := &in.PKIXPublicKey, &out.PKIXPublicKey + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ProofOfPossession != nil { + in, out := &in.ProofOfPossession, &out.ProofOfPossession + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestSpec. +func (in *PodCertificateRequestSpec) DeepCopy() *PodCertificateRequestSpec { + if in == nil { + return nil + } + out := new(PodCertificateRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateRequestStatus) DeepCopyInto(out *PodCertificateRequestStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotBefore != nil { + in, out := &in.NotBefore, &out.NotBefore + *out = (*in).DeepCopy() + } + if in.BeginRefreshAt != nil { + in, out := &in.BeginRefreshAt, &out.BeginRefreshAt + *out = (*in).DeepCopy() + } + if in.NotAfter != nil { + in, out := &in.NotAfter, &out.NotAfter + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestStatus. +func (in *PodCertificateRequestStatus) DeepCopy() *PodCertificateRequestStatus { + if in == nil { + return nil + } + out := new(PodCertificateRequestStatus) + in.DeepCopyInto(out) + return out +} diff --git a/e2e/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/e2e/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go index 3121a87d082..edbfce79bc7 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/e2e/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -56,3 +56,39 @@ func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { return 1, 37 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodCertificateRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PodCertificateRequest) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PodCertificateRequest) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodCertificateRequestList) APILifecycleIntroduced() (major, minor int) { + return 1, 34 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PodCertificateRequestList) APILifecycleDeprecated() (major, minor int) { + return 1, 37 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PodCertificateRequestList) APILifecycleRemoved() (major, minor int) { + return 1, 40 +} diff --git a/e2e/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/e2e/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 7c48270f65c..4c9385c1960 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/e2e/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -30,6 +30,8 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "k8s.io/api/certificates/v1beta1"; // Describes a certificate signing request +// +k8s:supportsSubresource=/status +// +k8s:supportsSubresource=/approval message CertificateSigningRequest { // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -182,6 +184,11 @@ message CertificateSigningRequestStatus { // +listType=map // +listMapKey=type // +optional + // +k8s:listType=map + // +k8s:listMapKey=type + // +k8s:optional + // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember + // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember repeated CertificateSigningRequestCondition conditions = 1; // If request was approved, the controller will place the issued certificate here. diff --git a/e2e/vendor/k8s.io/api/certificates/v1beta1/types.go b/e2e/vendor/k8s.io/api/certificates/v1beta1/types.go index 1ce104807dd..fadb7e082ef 100644 --- a/e2e/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/e2e/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -31,6 +31,8 @@ import ( // +k8s:prerelease-lifecycle-gen:replacement=certificates.k8s.io,v1,CertificateSigningRequest // Describes a certificate signing request +// +k8s:supportsSubresource=/status +// +k8s:supportsSubresource=/approval type CertificateSigningRequest struct { metav1.TypeMeta `json:",inline"` // +optional @@ -175,6 +177,11 @@ type CertificateSigningRequestStatus struct { // +listType=map // +listMapKey=type // +optional + // +k8s:listType=map + // +k8s:listMapKey=type + // +k8s:optional + // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember + // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` // If request was approved, the controller will place the issued certificate here. diff --git a/e2e/vendor/k8s.io/api/core/v1/generated.pb.go b/e2e/vendor/k8s.io/api/core/v1/generated.pb.go index a4b8f584295..e1a297b9854 100644 --- a/e2e/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/e2e/vendor/k8s.io/api/core/v1/generated.pb.go @@ -861,10 +861,38 @@ func (m *Container) XXX_DiscardUnknown() { var xxx_messageInfo_Container proto.InternalMessageInfo +func (m *ContainerExtendedResourceRequest) Reset() { *m = ContainerExtendedResourceRequest{} } +func (*ContainerExtendedResourceRequest) ProtoMessage() {} +func (*ContainerExtendedResourceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{29} +} +func (m *ContainerExtendedResourceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerExtendedResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerExtendedResourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerExtendedResourceRequest.Merge(m, src) +} +func (m *ContainerExtendedResourceRequest) XXX_Size() int { + return m.Size() +} +func (m *ContainerExtendedResourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerExtendedResourceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerExtendedResourceRequest proto.InternalMessageInfo + func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{29} + return fileDescriptor_6c07b07c062484ab, []int{30} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +920,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{30} + return fileDescriptor_6c07b07c062484ab, []int{31} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +948,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } func (*ContainerResizePolicy) ProtoMessage() {} func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{31} + return fileDescriptor_6c07b07c062484ab, []int{32} } func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -945,10 +973,66 @@ func (m *ContainerResizePolicy) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo +func (m *ContainerRestartRule) Reset() { *m = ContainerRestartRule{} } +func (*ContainerRestartRule) ProtoMessage() {} +func (*ContainerRestartRule) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{33} +} +func (m *ContainerRestartRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerRestartRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerRestartRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerRestartRule.Merge(m, src) +} +func (m *ContainerRestartRule) XXX_Size() int { + return m.Size() +} +func (m *ContainerRestartRule) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerRestartRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerRestartRule proto.InternalMessageInfo + +func (m *ContainerRestartRuleOnExitCodes) Reset() { *m = ContainerRestartRuleOnExitCodes{} } +func (*ContainerRestartRuleOnExitCodes) ProtoMessage() {} +func (*ContainerRestartRuleOnExitCodes) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{34} +} +func (m *ContainerRestartRuleOnExitCodes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerRestartRuleOnExitCodes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerRestartRuleOnExitCodes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerRestartRuleOnExitCodes.Merge(m, src) +} +func (m *ContainerRestartRuleOnExitCodes) XXX_Size() int { + return m.Size() +} +func (m *ContainerRestartRuleOnExitCodes) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerRestartRuleOnExitCodes.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerRestartRuleOnExitCodes proto.InternalMessageInfo + func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{32} + return fileDescriptor_6c07b07c062484ab, []int{35} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1060,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{33} + return fileDescriptor_6c07b07c062484ab, []int{36} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1088,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{34} + return fileDescriptor_6c07b07c062484ab, []int{37} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1116,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{35} + return fileDescriptor_6c07b07c062484ab, []int{38} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1144,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{36} + return fileDescriptor_6c07b07c062484ab, []int{39} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1172,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo func (m *ContainerUser) Reset() { *m = ContainerUser{} } func (*ContainerUser) ProtoMessage() {} func (*ContainerUser) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{37} + return fileDescriptor_6c07b07c062484ab, []int{40} } func (m *ContainerUser) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1200,7 @@ var xxx_messageInfo_ContainerUser proto.InternalMessageInfo func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{38} + return fileDescriptor_6c07b07c062484ab, []int{41} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1228,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{39} + return fileDescriptor_6c07b07c062484ab, []int{42} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1256,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{40} + return fileDescriptor_6c07b07c062484ab, []int{43} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1284,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{41} + return fileDescriptor_6c07b07c062484ab, []int{44} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1312,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{42} + return fileDescriptor_6c07b07c062484ab, []int{45} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1340,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{43} + return fileDescriptor_6c07b07c062484ab, []int{46} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1368,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{44} + return fileDescriptor_6c07b07c062484ab, []int{47} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1396,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{45} + return fileDescriptor_6c07b07c062484ab, []int{48} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1424,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{46} + return fileDescriptor_6c07b07c062484ab, []int{49} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1452,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{47} + return fileDescriptor_6c07b07c062484ab, []int{50} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1480,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{48} + return fileDescriptor_6c07b07c062484ab, []int{51} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1508,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{49} + return fileDescriptor_6c07b07c062484ab, []int{52} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1536,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{50} + return fileDescriptor_6c07b07c062484ab, []int{53} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1564,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{51} + return fileDescriptor_6c07b07c062484ab, []int{54} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1592,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{52} + return fileDescriptor_6c07b07c062484ab, []int{55} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1620,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{53} + return fileDescriptor_6c07b07c062484ab, []int{56} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1648,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{54} + return fileDescriptor_6c07b07c062484ab, []int{57} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1676,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{55} + return fileDescriptor_6c07b07c062484ab, []int{58} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1704,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{56} + return fileDescriptor_6c07b07c062484ab, []int{59} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1732,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{57} + return fileDescriptor_6c07b07c062484ab, []int{60} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1760,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{58} + return fileDescriptor_6c07b07c062484ab, []int{61} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1788,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{59} + return fileDescriptor_6c07b07c062484ab, []int{62} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1729,10 +1813,38 @@ func (m *FCVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo +func (m *FileKeySelector) Reset() { *m = FileKeySelector{} } +func (*FileKeySelector) ProtoMessage() {} +func (*FileKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{63} +} +func (m *FileKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FileKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileKeySelector.Merge(m, src) +} +func (m *FileKeySelector) XXX_Size() int { + return m.Size() +} +func (m *FileKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_FileKeySelector.DiscardUnknown(m) +} + +var xxx_messageInfo_FileKeySelector proto.InternalMessageInfo + func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{60} + return fileDescriptor_6c07b07c062484ab, []int{64} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1872,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{61} + return fileDescriptor_6c07b07c062484ab, []int{65} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1900,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{62} + return fileDescriptor_6c07b07c062484ab, []int{66} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1928,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{63} + return fileDescriptor_6c07b07c062484ab, []int{67} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1956,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{64} + return fileDescriptor_6c07b07c062484ab, []int{68} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1984,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{65} + return fileDescriptor_6c07b07c062484ab, []int{69} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +2012,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{66} + return fileDescriptor_6c07b07c062484ab, []int{70} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +2040,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{67} + return fileDescriptor_6c07b07c062484ab, []int{71} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +2068,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{68} + return fileDescriptor_6c07b07c062484ab, []int{72} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2096,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{69} + return fileDescriptor_6c07b07c062484ab, []int{73} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2124,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{70} + return fileDescriptor_6c07b07c062484ab, []int{74} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2152,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostIP) Reset() { *m = HostIP{} } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{71} + return fileDescriptor_6c07b07c062484ab, []int{75} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2180,7 @@ var xxx_messageInfo_HostIP proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{72} + return fileDescriptor_6c07b07c062484ab, []int{76} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2208,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{73} + return fileDescriptor_6c07b07c062484ab, []int{77} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2236,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{74} + return fileDescriptor_6c07b07c062484ab, []int{78} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2264,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *ImageVolumeSource) Reset() { *m = ImageVolumeSource{} } func (*ImageVolumeSource) ProtoMessage() {} func (*ImageVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{75} + return fileDescriptor_6c07b07c062484ab, []int{79} } func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2292,7 @@ var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{76} + return fileDescriptor_6c07b07c062484ab, []int{80} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2320,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{77} + return fileDescriptor_6c07b07c062484ab, []int{81} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2348,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{78} + return fileDescriptor_6c07b07c062484ab, []int{82} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2376,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{79} + return fileDescriptor_6c07b07c062484ab, []int{83} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2404,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{80} + return fileDescriptor_6c07b07c062484ab, []int{84} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2432,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{81} + return fileDescriptor_6c07b07c062484ab, []int{85} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2460,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{82} + return fileDescriptor_6c07b07c062484ab, []int{86} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2488,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *LinuxContainerUser) Reset() { *m = LinuxContainerUser{} } func (*LinuxContainerUser) ProtoMessage() {} func (*LinuxContainerUser) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{83} + return fileDescriptor_6c07b07c062484ab, []int{87} } func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2516,7 @@ var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{84} + return fileDescriptor_6c07b07c062484ab, []int{88} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2544,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{85} + return fileDescriptor_6c07b07c062484ab, []int{89} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2572,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{86} + return fileDescriptor_6c07b07c062484ab, []int{90} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2600,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{87} + return fileDescriptor_6c07b07c062484ab, []int{91} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2628,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{88} + return fileDescriptor_6c07b07c062484ab, []int{92} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2656,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} } func (*ModifyVolumeStatus) ProtoMessage() {} func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{89} + return fileDescriptor_6c07b07c062484ab, []int{93} } func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2684,7 @@ var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{90} + return fileDescriptor_6c07b07c062484ab, []int{94} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2712,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{91} + return fileDescriptor_6c07b07c062484ab, []int{95} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2740,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{92} + return fileDescriptor_6c07b07c062484ab, []int{96} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2768,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{93} + return fileDescriptor_6c07b07c062484ab, []int{97} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2796,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{94} + return fileDescriptor_6c07b07c062484ab, []int{98} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2824,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{95} + return fileDescriptor_6c07b07c062484ab, []int{99} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2852,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{96} + return fileDescriptor_6c07b07c062484ab, []int{100} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2880,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{97} + return fileDescriptor_6c07b07c062484ab, []int{101} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2908,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{98} + return fileDescriptor_6c07b07c062484ab, []int{102} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2936,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{99} + return fileDescriptor_6c07b07c062484ab, []int{103} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2964,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{100} + return fileDescriptor_6c07b07c062484ab, []int{104} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2992,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{101} + return fileDescriptor_6c07b07c062484ab, []int{105} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +3020,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{102} + return fileDescriptor_6c07b07c062484ab, []int{106} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +3048,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeFeatures) Reset() { *m = NodeFeatures{} } func (*NodeFeatures) ProtoMessage() {} func (*NodeFeatures) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{103} + return fileDescriptor_6c07b07c062484ab, []int{107} } func (m *NodeFeatures) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +3076,7 @@ var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{104} + return fileDescriptor_6c07b07c062484ab, []int{108} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3104,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{105} + return fileDescriptor_6c07b07c062484ab, []int{109} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3132,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeRuntimeHandler) Reset() { *m = NodeRuntimeHandler{} } func (*NodeRuntimeHandler) ProtoMessage() {} func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{106} + return fileDescriptor_6c07b07c062484ab, []int{110} } func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3160,7 @@ var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo func (m *NodeRuntimeHandlerFeatures) Reset() { *m = NodeRuntimeHandlerFeatures{} } func (*NodeRuntimeHandlerFeatures) ProtoMessage() {} func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{107} + return fileDescriptor_6c07b07c062484ab, []int{111} } func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3188,7 @@ var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{108} + return fileDescriptor_6c07b07c062484ab, []int{112} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3216,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{109} + return fileDescriptor_6c07b07c062484ab, []int{113} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3244,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{110} + return fileDescriptor_6c07b07c062484ab, []int{114} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3272,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{111} + return fileDescriptor_6c07b07c062484ab, []int{115} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3300,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{112} + return fileDescriptor_6c07b07c062484ab, []int{116} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3328,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSwapStatus) Reset() { *m = NodeSwapStatus{} } func (*NodeSwapStatus) ProtoMessage() {} func (*NodeSwapStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{113} + return fileDescriptor_6c07b07c062484ab, []int{117} } func (m *NodeSwapStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3356,7 @@ var xxx_messageInfo_NodeSwapStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{114} + return fileDescriptor_6c07b07c062484ab, []int{118} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3384,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{115} + return fileDescriptor_6c07b07c062484ab, []int{119} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3412,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{116} + return fileDescriptor_6c07b07c062484ab, []int{120} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3440,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{117} + return fileDescriptor_6c07b07c062484ab, []int{121} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3468,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{118} + return fileDescriptor_6c07b07c062484ab, []int{122} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{119} + return fileDescriptor_6c07b07c062484ab, []int{123} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{120} + return fileDescriptor_6c07b07c062484ab, []int{124} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{121} + return fileDescriptor_6c07b07c062484ab, []int{125} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{122} + return fileDescriptor_6c07b07c062484ab, []int{126} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{123} + return fileDescriptor_6c07b07c062484ab, []int{127} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{124} + return fileDescriptor_6c07b07c062484ab, []int{128} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3664,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{125} + return fileDescriptor_6c07b07c062484ab, []int{129} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3692,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{126} + return fileDescriptor_6c07b07c062484ab, []int{130} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3720,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{127} + return fileDescriptor_6c07b07c062484ab, []int{131} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3748,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{128} + return fileDescriptor_6c07b07c062484ab, []int{132} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3776,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{129} + return fileDescriptor_6c07b07c062484ab, []int{133} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3804,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{130} + return fileDescriptor_6c07b07c062484ab, []int{134} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3832,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{131} + return fileDescriptor_6c07b07c062484ab, []int{135} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3860,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{132} + return fileDescriptor_6c07b07c062484ab, []int{136} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3888,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{133} + return fileDescriptor_6c07b07c062484ab, []int{137} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3916,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{134} + return fileDescriptor_6c07b07c062484ab, []int{138} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3829,10 +3941,38 @@ func (m *PodAttachOptions) XXX_DiscardUnknown() { var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo +func (m *PodCertificateProjection) Reset() { *m = PodCertificateProjection{} } +func (*PodCertificateProjection) ProtoMessage() {} +func (*PodCertificateProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{139} +} +func (m *PodCertificateProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCertificateProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCertificateProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCertificateProjection.Merge(m, src) +} +func (m *PodCertificateProjection) XXX_Size() int { + return m.Size() +} +func (m *PodCertificateProjection) XXX_DiscardUnknown() { + xxx_messageInfo_PodCertificateProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCertificateProjection proto.InternalMessageInfo + func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{135} + return fileDescriptor_6c07b07c062484ab, []int{140} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +4000,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{136} + return fileDescriptor_6c07b07c062484ab, []int{141} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +4028,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{137} + return fileDescriptor_6c07b07c062484ab, []int{142} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +4056,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{138} + return fileDescriptor_6c07b07c062484ab, []int{143} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3941,10 +4081,38 @@ func (m *PodExecOptions) XXX_DiscardUnknown() { var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo +func (m *PodExtendedResourceClaimStatus) Reset() { *m = PodExtendedResourceClaimStatus{} } +func (*PodExtendedResourceClaimStatus) ProtoMessage() {} +func (*PodExtendedResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{144} +} +func (m *PodExtendedResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodExtendedResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodExtendedResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodExtendedResourceClaimStatus.Merge(m, src) +} +func (m *PodExtendedResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *PodExtendedResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodExtendedResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodExtendedResourceClaimStatus proto.InternalMessageInfo + func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{139} + return fileDescriptor_6c07b07c062484ab, []int{145} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4140,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{140} + return fileDescriptor_6c07b07c062484ab, []int{146} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4168,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{141} + return fileDescriptor_6c07b07c062484ab, []int{147} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4196,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{142} + return fileDescriptor_6c07b07c062484ab, []int{148} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4224,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{143} + return fileDescriptor_6c07b07c062484ab, []int{149} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4252,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{144} + return fileDescriptor_6c07b07c062484ab, []int{150} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4280,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{145} + return fileDescriptor_6c07b07c062484ab, []int{151} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4308,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{146} + return fileDescriptor_6c07b07c062484ab, []int{152} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4336,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } func (*PodResourceClaimStatus) ProtoMessage() {} func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{147} + return fileDescriptor_6c07b07c062484ab, []int{153} } func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4364,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{148} + return fileDescriptor_6c07b07c062484ab, []int{154} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4392,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{149} + return fileDescriptor_6c07b07c062484ab, []int{155} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4420,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{150} + return fileDescriptor_6c07b07c062484ab, []int{156} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4448,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{151} + return fileDescriptor_6c07b07c062484ab, []int{157} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4476,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{152} + return fileDescriptor_6c07b07c062484ab, []int{158} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4504,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{153} + return fileDescriptor_6c07b07c062484ab, []int{159} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4532,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{154} + return fileDescriptor_6c07b07c062484ab, []int{160} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4560,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{155} + return fileDescriptor_6c07b07c062484ab, []int{161} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4588,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{156} + return fileDescriptor_6c07b07c062484ab, []int{162} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4616,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{157} + return fileDescriptor_6c07b07c062484ab, []int{163} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4644,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{158} + return fileDescriptor_6c07b07c062484ab, []int{164} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4672,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{159} + return fileDescriptor_6c07b07c062484ab, []int{165} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4700,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{160} + return fileDescriptor_6c07b07c062484ab, []int{166} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4728,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{161} + return fileDescriptor_6c07b07c062484ab, []int{167} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4756,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{162} + return fileDescriptor_6c07b07c062484ab, []int{168} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4784,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{163} + return fileDescriptor_6c07b07c062484ab, []int{169} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4812,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{164} + return fileDescriptor_6c07b07c062484ab, []int{170} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4840,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{165} + return fileDescriptor_6c07b07c062484ab, []int{171} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4868,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{166} + return fileDescriptor_6c07b07c062484ab, []int{172} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4896,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{167} + return fileDescriptor_6c07b07c062484ab, []int{173} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4924,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{168} + return fileDescriptor_6c07b07c062484ab, []int{174} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4952,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{169} + return fileDescriptor_6c07b07c062484ab, []int{175} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4980,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{170} + return fileDescriptor_6c07b07c062484ab, []int{176} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +5008,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{171} + return fileDescriptor_6c07b07c062484ab, []int{177} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +5036,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{172} + return fileDescriptor_6c07b07c062484ab, []int{178} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +5064,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{173} + return fileDescriptor_6c07b07c062484ab, []int{179} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +5092,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{174} + return fileDescriptor_6c07b07c062484ab, []int{180} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5120,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{175} + return fileDescriptor_6c07b07c062484ab, []int{181} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5148,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceHealth) Reset() { *m = ResourceHealth{} } func (*ResourceHealth) ProtoMessage() {} func (*ResourceHealth) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{176} + return fileDescriptor_6c07b07c062484ab, []int{182} } func (m *ResourceHealth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5176,7 @@ var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{177} + return fileDescriptor_6c07b07c062484ab, []int{183} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5204,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{178} + return fileDescriptor_6c07b07c062484ab, []int{184} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5232,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{179} + return fileDescriptor_6c07b07c062484ab, []int{185} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5260,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{180} + return fileDescriptor_6c07b07c062484ab, []int{186} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5288,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{181} + return fileDescriptor_6c07b07c062484ab, []int{187} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5316,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } func (*ResourceStatus) ProtoMessage() {} func (*ResourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{182} + return fileDescriptor_6c07b07c062484ab, []int{188} } func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5344,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{183} + return fileDescriptor_6c07b07c062484ab, []int{189} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5372,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{184} + return fileDescriptor_6c07b07c062484ab, []int{190} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5400,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{185} + return fileDescriptor_6c07b07c062484ab, []int{191} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5428,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{186} + return fileDescriptor_6c07b07c062484ab, []int{192} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5456,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{187} + return fileDescriptor_6c07b07c062484ab, []int{193} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5484,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{188} + return fileDescriptor_6c07b07c062484ab, []int{194} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5512,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{189} + return fileDescriptor_6c07b07c062484ab, []int{195} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5540,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{190} + return fileDescriptor_6c07b07c062484ab, []int{196} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5568,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{191} + return fileDescriptor_6c07b07c062484ab, []int{197} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5596,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{192} + return fileDescriptor_6c07b07c062484ab, []int{198} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5624,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{193} + return fileDescriptor_6c07b07c062484ab, []int{199} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5652,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{194} + return fileDescriptor_6c07b07c062484ab, []int{200} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5680,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{195} + return fileDescriptor_6c07b07c062484ab, []int{201} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5708,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{196} + return fileDescriptor_6c07b07c062484ab, []int{202} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5736,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{197} + return fileDescriptor_6c07b07c062484ab, []int{203} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5764,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{198} + return fileDescriptor_6c07b07c062484ab, []int{204} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5792,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{199} + return fileDescriptor_6c07b07c062484ab, []int{205} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5820,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{200} + return fileDescriptor_6c07b07c062484ab, []int{206} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5848,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{201} + return fileDescriptor_6c07b07c062484ab, []int{207} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5876,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{202} + return fileDescriptor_6c07b07c062484ab, []int{208} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5904,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{203} + return fileDescriptor_6c07b07c062484ab, []int{209} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5932,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{204} + return fileDescriptor_6c07b07c062484ab, []int{210} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5960,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{205} + return fileDescriptor_6c07b07c062484ab, []int{211} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5988,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{206} + return fileDescriptor_6c07b07c062484ab, []int{212} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +6016,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{207} + return fileDescriptor_6c07b07c062484ab, []int{213} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +6044,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *SleepAction) Reset() { *m = SleepAction{} } func (*SleepAction) ProtoMessage() {} func (*SleepAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{208} + return fileDescriptor_6c07b07c062484ab, []int{214} } func (m *SleepAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +6072,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{209} + return fileDescriptor_6c07b07c062484ab, []int{215} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +6100,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{210} + return fileDescriptor_6c07b07c062484ab, []int{216} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +6128,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{211} + return fileDescriptor_6c07b07c062484ab, []int{217} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6156,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{212} + return fileDescriptor_6c07b07c062484ab, []int{218} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6184,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{213} + return fileDescriptor_6c07b07c062484ab, []int{219} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6212,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{214} + return fileDescriptor_6c07b07c062484ab, []int{220} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6072,7 +6240,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{215} + return fileDescriptor_6c07b07c062484ab, []int{221} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6268,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{216} + return fileDescriptor_6c07b07c062484ab, []int{222} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6128,7 +6296,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{217} + return fileDescriptor_6c07b07c062484ab, []int{223} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6156,7 +6324,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{218} + return fileDescriptor_6c07b07c062484ab, []int{224} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6184,7 +6352,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{219} + return fileDescriptor_6c07b07c062484ab, []int{225} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6212,7 +6380,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{220} + return fileDescriptor_6c07b07c062484ab, []int{226} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6240,7 +6408,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{221} + return fileDescriptor_6c07b07c062484ab, []int{227} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6268,7 +6436,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{222} + return fileDescriptor_6c07b07c062484ab, []int{228} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6296,7 +6464,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} } func (*VolumeMountStatus) ProtoMessage() {} func (*VolumeMountStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{223} + return fileDescriptor_6c07b07c062484ab, []int{229} } func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6324,7 +6492,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{224} + return fileDescriptor_6c07b07c062484ab, []int{230} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6352,7 +6520,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{225} + return fileDescriptor_6c07b07c062484ab, []int{231} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6380,7 +6548,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } func (*VolumeResourceRequirements) ProtoMessage() {} func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{226} + return fileDescriptor_6c07b07c062484ab, []int{232} } func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6408,7 +6576,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{227} + return fileDescriptor_6c07b07c062484ab, []int{233} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6436,7 +6604,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{228} + return fileDescriptor_6c07b07c062484ab, []int{234} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6464,7 +6632,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{229} + return fileDescriptor_6c07b07c062484ab, []int{235} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6492,7 +6660,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{230} + return fileDescriptor_6c07b07c062484ab, []int{236} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6551,9 +6719,12 @@ func init() { proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerExtendedResourceRequest)(nil), "k8s.io.api.core.v1.ContainerExtendedResourceRequest") proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy") + proto.RegisterType((*ContainerRestartRule)(nil), "k8s.io.api.core.v1.ContainerRestartRule") + proto.RegisterType((*ContainerRestartRuleOnExitCodes)(nil), "k8s.io.api.core.v1.ContainerRestartRuleOnExitCodes") proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") @@ -6583,6 +6754,7 @@ func init() { proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FileKeySelector)(nil), "k8s.io.api.core.v1.FileKeySelector") proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry") proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") @@ -6671,10 +6843,12 @@ func init() { proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCertificateProjection)(nil), "k8s.io.api.core.v1.PodCertificateProjection") proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodExtendedResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodExtendedResourceClaimStatus") proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") @@ -6787,1020 +6961,1049 @@ func init() { } var fileDescriptor_6c07b07c062484ab = []byte{ - // 16206 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9, - 0x75, 0x30, 0xc6, 0xea, 0x9e, 0xf3, 0xcd, 0x9d, 0xb8, 0x06, 0xb3, 0x00, 0x1a, 0x5b, 0xbb, 0x8b, - 0xc5, 0x5e, 0x03, 0x62, 0x0f, 0x2e, 0xb8, 0xbb, 0x5c, 0xed, 0x9c, 0x40, 0x2f, 0x30, 0x83, 0xde, - 0xec, 0x01, 0x40, 0x2e, 0x97, 0x14, 0x0b, 0xdd, 0x39, 0x33, 0xc5, 0xe9, 0xae, 0xea, 0xad, 0xaa, - 0x1e, 0x60, 0x60, 0x2a, 0x24, 0x51, 0x16, 0x25, 0x52, 0x72, 0x04, 0x43, 0x21, 0x59, 0x0e, 0x4a, - 0xa1, 0x1f, 0xba, 0x65, 0x5a, 0xb2, 0x68, 0xc9, 0x92, 0x2c, 0xea, 0xb2, 0x2d, 0x47, 0xc8, 0xfe, - 0x21, 0x4b, 0x8a, 0x30, 0xa9, 0xb0, 0xc2, 0x23, 0x73, 0x6c, 0x87, 0x42, 0x3f, 0x2c, 0x29, 0x64, - 0xff, 0xb0, 0x27, 0xf4, 0x7d, 0xfc, 0x22, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x07, 0x0b, 0x0c, 0x97, - 0x8c, 0xfd, 0xd7, 0x9d, 0xef, 0xe5, 0xcb, 0xac, 0x3c, 0x5f, 0xbe, 0x13, 0xec, 0xad, 0x4b, 0xe1, - 0xac, 0xeb, 0x5f, 0x70, 0x5a, 0xee, 0x85, 0x9a, 0x1f, 0x90, 0x0b, 0xdb, 0x17, 0x2f, 0x6c, 0x10, - 0x8f, 0x04, 0x4e, 0x44, 0xea, 0xb3, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xce, 0xac, 0xd3, 0x72, - 0x67, 0x29, 0xce, 0xec, 0xf6, 0xc5, 0x99, 0xe7, 0x36, 0xdc, 0x68, 0xb3, 0x7d, 0x7b, 0xb6, 0xe6, - 0x37, 0x2f, 0x6c, 0xf8, 0x1b, 0xfe, 0x05, 0x86, 0x7a, 0xbb, 0xbd, 0xce, 0xfe, 0xb1, 0x3f, 0xec, - 0x17, 0x27, 0x31, 0xf3, 0x62, 0xdc, 0x4c, 0xd3, 0xa9, 0x6d, 0xba, 0x1e, 0x09, 0x76, 0x2e, 0xb4, - 0xb6, 0x36, 0x58, 0xbb, 0x01, 0x09, 0xfd, 0x76, 0x50, 0x23, 0xc9, 0x86, 0x3b, 0xd6, 0x0a, 0x2f, - 0x34, 0x49, 0xe4, 0x64, 0x74, 0x77, 0xe6, 0x42, 0x5e, 0xad, 0xa0, 0xed, 0x45, 0x6e, 0x33, 0xdd, - 0xcc, 0x47, 0xba, 0x55, 0x08, 0x6b, 0x9b, 0xa4, 0xe9, 0xa4, 0xea, 0xbd, 0x90, 0x57, 0xaf, 0x1d, - 0xb9, 0x8d, 0x0b, 0xae, 0x17, 0x85, 0x51, 0x90, 0xac, 0x64, 0x7f, 0xd3, 0x82, 0xb3, 0x73, 0xb7, - 0xaa, 0x4b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x7c, 0xc3, 0xaf, 0x6d, 0x55, 0x23, 0x3f, 0x20, 0x37, - 0xfd, 0x46, 0xbb, 0x49, 0xaa, 0x6c, 0x20, 0xd0, 0xb3, 0x30, 0xb4, 0xcd, 0xfe, 0x97, 0x17, 0xa7, - 0xad, 0xb3, 0xd6, 0xf9, 0xe1, 0xf9, 0xc9, 0xbf, 0xd8, 0x2d, 0x7d, 0x68, 0x6f, 0xb7, 0x34, 0x74, - 0x53, 0x94, 0x63, 0x85, 0x81, 0xce, 0xc1, 0xc0, 0x7a, 0xb8, 0xb6, 0xd3, 0x22, 0xd3, 0x05, 0x86, - 0x3b, 0x2e, 0x70, 0x07, 0x96, 0xab, 0xb4, 0x14, 0x0b, 0x28, 0xba, 0x00, 0xc3, 0x2d, 0x27, 0x88, - 0xdc, 0xc8, 0xf5, 0xbd, 0xe9, 0xe2, 0x59, 0xeb, 0x7c, 0xff, 0xfc, 0x94, 0x40, 0x1d, 0xae, 0x48, - 0x00, 0x8e, 0x71, 0x68, 0x37, 0x02, 0xe2, 0xd4, 0xaf, 0x7b, 0x8d, 0x9d, 0xe9, 0xbe, 0xb3, 0xd6, - 0xf9, 0xa1, 0xb8, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0x7f, 0xa5, 0x00, 0x43, 0x73, 0xeb, 0xeb, - 0xae, 0xe7, 0x46, 0x3b, 0xe8, 0x26, 0x8c, 0x7a, 0x7e, 0x9d, 0xc8, 0xff, 0xec, 0x2b, 0x46, 0x9e, - 0x3f, 0x3b, 0x9b, 0x5e, 0x4a, 0xb3, 0xab, 0x1a, 0xde, 0xfc, 0xe4, 0xde, 0x6e, 0x69, 0x54, 0x2f, - 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xa4, 0xe5, 0xd7, 0x15, 0xd9, 0x02, 0x23, 0x5b, 0xca, 0x22, 0x5b, - 0x89, 0xd1, 0xe6, 0x27, 0xf6, 0x76, 0x4b, 0x23, 0x5a, 0x01, 0xd6, 0x89, 0xa0, 0xdb, 0x30, 0x41, - 0xff, 0x7a, 0x91, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0xc7, 0xf2, 0xe8, 0x6a, 0xa8, 0xf3, 0x47, 0xf6, - 0x76, 0x4b, 0x13, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xff, 0xa4, 0x05, 0x13, 0x73, 0xad, 0xd6, 0x5c, - 0xd0, 0xf4, 0x83, 0x4a, 0xe0, 0xaf, 0xbb, 0x0d, 0x82, 0x5e, 0x86, 0xbe, 0x88, 0xce, 0x1a, 0x9f, - 0xe1, 0xc7, 0xc4, 0xd0, 0xf6, 0xd1, 0xb9, 0xda, 0xdf, 0x2d, 0x1d, 0x49, 0xa0, 0xb3, 0xa9, 0x64, - 0x15, 0xd0, 0x1b, 0x30, 0xd9, 0xf0, 0x6b, 0x4e, 0x63, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f, - 0x74, 0x6f, 0xb7, 0x34, 0x79, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x1e, 0x8c, 0xcf, 0x45, 0x91, - 0x53, 0xdb, 0x24, 0x75, 0xbe, 0xa0, 0xd0, 0x8b, 0xd0, 0xe7, 0x39, 0x4d, 0xd9, 0x99, 0xb3, 0xb2, - 0x33, 0xab, 0x4e, 0x93, 0x76, 0x66, 0xf2, 0x86, 0xe7, 0xbe, 0xdb, 0x16, 0x8b, 0x94, 0x96, 0x61, - 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x6d, 0xb7, 0x46, 0x2a, 0x4e, 0xb4, 0x29, 0xfa, 0x80, 0x44, - 0x5d, 0x58, 0x54, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x78, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf, - 0x1e, 0xa2, 0x2d, 0x98, 0x68, 0x05, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xda, 0x3a, 0x5b, 0x3c, 0x3f, - 0xf2, 0xfc, 0xf9, 0xcc, 0xb1, 0x37, 0x51, 0x97, 0xbc, 0x28, 0xd8, 0x99, 0x3f, 0x21, 0xda, 0x9b, - 0x48, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xe7, 0x05, 0x38, 0x36, 0x77, 0xaf, 0x1d, 0x90, 0x45, 0x37, - 0xdc, 0x4a, 0x6e, 0xb8, 0xba, 0x1b, 0x6e, 0xad, 0xc6, 0x23, 0xa0, 0x56, 0xfa, 0xa2, 0x28, 0xc7, - 0x0a, 0x03, 0x3d, 0x07, 0x83, 0xf4, 0xf7, 0x0d, 0x5c, 0x16, 0x9f, 0x7c, 0x44, 0x20, 0x8f, 0x2c, - 0x3a, 0x91, 0xb3, 0xc8, 0x41, 0x58, 0xe2, 0xa0, 0x15, 0x18, 0xa9, 0xb1, 0xf3, 0x61, 0x63, 0xc5, - 0xaf, 0x13, 0xb6, 0xb6, 0x86, 0xe7, 0x9f, 0xa1, 0xe8, 0x0b, 0x71, 0xf1, 0xfe, 0x6e, 0x69, 0x9a, - 0xf7, 0x4d, 0x90, 0xd0, 0x60, 0x58, 0xaf, 0x8f, 0x6c, 0xb5, 0xdd, 0xfb, 0x18, 0x25, 0xc8, 0xd8, - 0xea, 0xe7, 0xb5, 0x9d, 0xdb, 0xcf, 0x76, 0xee, 0x68, 0xf6, 0xae, 0x45, 0x17, 0xa1, 0x6f, 0xcb, - 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd6, 0x69, 0x3a, 0xe7, 0x57, 0x5d, 0xaf, 0xbe, 0xbf, 0x5b, 0x9a, - 0x32, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0x1f, 0x0b, 0x4a, 0x0c, 0xb6, 0xec, 0x36, 0x48, - 0x85, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x1e, 0x20, 0x24, 0xb5, 0x80, 0x44, - 0xda, 0x90, 0xaa, 0x85, 0x51, 0x55, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, 0xb6, - 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x2a, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xb7, 0xf3, - 0x09, 0x7d, 0x0c, 0x26, 0xe2, 0xc6, 0xc2, 0x96, 0x53, 0x93, 0x03, 0xc8, 0x76, 0x70, 0xd5, 0x04, - 0xe1, 0x24, 0xae, 0xfd, 0x9f, 0x5b, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, 0x7f, 0xab, 0xfd, 0x07, - 0x16, 0x0c, 0xce, 0xbb, 0x5e, 0xdd, 0xf5, 0x36, 0xd0, 0x67, 0x60, 0x88, 0x5e, 0x95, 0x75, 0x27, - 0x72, 0xc4, 0x31, 0xfc, 0x61, 0x6d, 0x6f, 0xa9, 0x9b, 0x6b, 0xb6, 0xb5, 0xb5, 0x41, 0x0b, 0xc2, - 0x59, 0x8a, 0x4d, 0x77, 0xdb, 0xf5, 0xdb, 0x9f, 0x25, 0xb5, 0x68, 0x85, 0x44, 0x4e, 0xfc, 0x39, - 0x71, 0x19, 0x56, 0x54, 0xd1, 0x55, 0x18, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xe2, 0x3c, 0xce, 0x3c, - 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x6a, 0x24, 0xbe, 0xa5, 0xd6, 0x58, 0x55, 0x2c, 0x48, - 0xd8, 0xff, 0x6e, 0x10, 0x4e, 0x2e, 0x54, 0xcb, 0x39, 0xeb, 0xea, 0x1c, 0x0c, 0xd4, 0x03, 0x77, - 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x45, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x82, 0x51, 0x7e, 0x3f, - 0x5e, 0x71, 0xbc, 0x7a, 0x7c, 0x3c, 0x0a, 0xec, 0xd1, 0x9b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, 0x5c, - 0x54, 0xe7, 0x12, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xa2, 0x05, 0x93, 0xbc, 0x99, 0xb9, 0x28, 0x0a, - 0xdc, 0xdb, 0xed, 0x88, 0x84, 0xd3, 0xfd, 0xec, 0xa4, 0x5b, 0xc8, 0x1a, 0xad, 0xdc, 0x11, 0x98, - 0xbd, 0x99, 0xa0, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee, 0x64, 0x12, 0x8c, 0x53, 0xcd, 0xa2, 0x1f, - 0xb1, 0x60, 0xa6, 0xe6, 0x7b, 0x51, 0xe0, 0x37, 0x1a, 0x24, 0xa8, 0xb4, 0x6f, 0x37, 0xdc, 0x70, - 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0x99, 0xbd, - 0xdd, 0xd2, 0xcc, 0x42, 0x2e, 0x29, 0xdc, 0xa1, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0xd5, 0xc8, - 0xd9, 0x20, 0x71, 0xe3, 0x83, 0xbd, 0x37, 0x7e, 0x7c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, 0x70, - 0x06, 0x59, 0xf4, 0x2e, 0x1c, 0xa5, 0xa5, 0xa9, 0x6f, 0x1d, 0xea, 0xbd, 0xb9, 0xe9, 0xbd, 0xdd, - 0xd2, 0xd1, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x21, 0x0b, 0x4e, 0xc6, 0x9f, 0xbf, 0x74, - 0xb7, 0xe5, 0x78, 0xf5, 0xb8, 0xe1, 0xe1, 0xde, 0x1b, 0xa6, 0x67, 0xf2, 0xc9, 0x85, 0x3c, 0x4a, - 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x11, 0xda, 0xb5, 0x64, 0xdb, 0xd0, 0x7b, 0xdb, 0x27, 0xf6, 0x76, - 0x4b, 0x47, 0x56, 0xd3, 0x34, 0x70, 0x16, 0xe1, 0x99, 0x05, 0x38, 0x96, 0xb9, 0x3a, 0xd1, 0x24, - 0x14, 0xb7, 0x08, 0x67, 0x02, 0x87, 0x31, 0xfd, 0x89, 0x8e, 0x42, 0xff, 0xb6, 0xd3, 0x68, 0x8b, - 0x8d, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x97, 0x2c, 0xfb, 0x7f, 0x28, 0xc2, 0xc4, 0x42, 0xb5, 0x7c, - 0x5f, 0xbb, 0x5e, 0xbf, 0xf6, 0x0a, 0x1d, 0xaf, 0xbd, 0xf8, 0x12, 0x2d, 0xe6, 0x5e, 0xa2, 0x3f, - 0x98, 0xb1, 0x65, 0xfb, 0xd8, 0x96, 0xfd, 0x68, 0xce, 0x96, 0x7d, 0xc0, 0x1b, 0x75, 0x3b, 0x67, - 0xd5, 0xf6, 0xb3, 0x09, 0xcc, 0xe4, 0x90, 0x18, 0xef, 0x97, 0x3c, 0x6a, 0x0f, 0xb8, 0x74, 0x1f, - 0xcc, 0x3c, 0xd6, 0x60, 0x74, 0xc1, 0x69, 0x39, 0xb7, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f, - 0x42, 0xd1, 0xa9, 0xd7, 0x19, 0x77, 0x37, 0x3c, 0x7f, 0x6c, 0x6f, 0xb7, 0x54, 0x9c, 0xab, 0x53, - 0x36, 0x03, 0x14, 0xd6, 0x0e, 0xa6, 0x18, 0xe8, 0x69, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0, - 0x30, 0xe9, 0x2e, 0xef, 0x5b, 0x0c, 0xfc, 0x56, 0x02, 0x95, 0xe1, 0xd8, 0x7f, 0x56, 0x80, 0x53, - 0x0b, 0xa4, 0xb5, 0xb9, 0x5c, 0xcd, 0xb9, 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, - 0x42, 0xd1, 0x34, 0x5b, 0x11, 0x2b, 0xa2, 0x0c, 0x2b, 0x28, 0x3a, 0x0b, 0x7d, 0xad, 0x98, 0x89, - 0x1d, 0x95, 0x0c, 0x30, 0x63, 0x5f, 0x19, 0x84, 0x62, 0xb4, 0x43, 0x12, 0x88, 0x15, 0xa3, 0x30, - 0x6e, 0x84, 0x24, 0xc0, 0x0c, 0x12, 0x73, 0x02, 0x94, 0x47, 0x10, 0x37, 0x42, 0x82, 0x13, 0xa0, - 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x0c, 0x87, 0x89, 0x99, 0xed, 0x69, 0x6b, 0x8e, 0x31, 0x56, 0x41, - 0xcd, 0x64, 0x4c, 0xc4, 0xb8, 0xc1, 0x06, 0xba, 0xb2, 0x0a, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8, - 0x5d, 0x36, 0x70, 0x37, 0xd2, 0x03, 0xd7, 0xfb, 0x96, 0x78, 0x50, 0xa3, 0xf7, 0xff, 0x5a, 0x70, - 0x6a, 0xc1, 0xf5, 0xea, 0x24, 0xc8, 0x59, 0x80, 0x0f, 0xe7, 0x29, 0x7f, 0x30, 0x26, 0xc5, 0x58, - 0x62, 0x7d, 0x0f, 0x60, 0x89, 0xd9, 0xff, 0x6c, 0x01, 0xe2, 0x9f, 0xfd, 0xbe, 0xfb, 0xd8, 0x1b, - 0xe9, 0x8f, 0x7d, 0x00, 0xcb, 0xc2, 0xbe, 0x06, 0xe3, 0x0b, 0x0d, 0x97, 0x78, 0x51, 0xb9, 0xb2, - 0xe0, 0x7b, 0xeb, 0xee, 0x06, 0x7a, 0x05, 0xc6, 0x23, 0xb7, 0x49, 0xfc, 0x76, 0x54, 0x25, 0x35, - 0xdf, 0x63, 0x2f, 0x57, 0xeb, 0x7c, 0xff, 0x3c, 0xda, 0xdb, 0x2d, 0x8d, 0xaf, 0x19, 0x10, 0x9c, - 0xc0, 0xb4, 0x7f, 0x95, 0x9e, 0x5b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2d, 0x68, 0x87, 0xd1, 0x7c, - 0x9b, 0xf2, 0x9e, 0x95, 0xc0, 0xa7, 0xdd, 0x71, 0x7d, 0x0f, 0x9d, 0x32, 0x9e, 0xe3, 0x43, 0xf2, - 0x29, 0x2e, 0x9e, 0xdd, 0xb3, 0x00, 0xa1, 0xbb, 0xe1, 0x91, 0x40, 0x7b, 0x3e, 0x8c, 0xb3, 0xad, - 0xa2, 0x4a, 0xb1, 0x86, 0x81, 0x1a, 0x30, 0xd6, 0x70, 0x6e, 0x93, 0x46, 0x95, 0x34, 0x48, 0x2d, - 0xf2, 0x03, 0x21, 0xdf, 0x78, 0xa1, 0xb7, 0x77, 0xc0, 0x35, 0xbd, 0xea, 0xfc, 0xd4, 0xde, 0x6e, - 0x69, 0xcc, 0x28, 0xc2, 0x26, 0x71, 0x7a, 0x74, 0xf8, 0x2d, 0xfa, 0x15, 0x4e, 0x43, 0x7f, 0x7c, - 0x5e, 0x17, 0x65, 0x58, 0x41, 0xd5, 0xd1, 0xd1, 0x97, 0x77, 0x74, 0xd8, 0x7f, 0x47, 0x17, 0x9a, - 0xdf, 0x6c, 0xf9, 0x1e, 0xf1, 0xa2, 0x05, 0xdf, 0xab, 0x73, 0xc9, 0xd4, 0x2b, 0x86, 0xe8, 0xe4, - 0x5c, 0x42, 0x74, 0x72, 0x3c, 0x5d, 0x43, 0x93, 0x9e, 0x7c, 0x14, 0x06, 0xc2, 0xc8, 0x89, 0xda, - 0xa1, 0x18, 0xb8, 0x47, 0xe5, 0xb2, 0xab, 0xb2, 0xd2, 0xfd, 0xdd, 0xd2, 0x84, 0xaa, 0xc6, 0x8b, - 0xb0, 0xa8, 0x80, 0x9e, 0x82, 0xc1, 0x26, 0x09, 0x43, 0x67, 0x43, 0xb2, 0x0d, 0x13, 0xa2, 0xee, - 0xe0, 0x0a, 0x2f, 0xc6, 0x12, 0x8e, 0x1e, 0x83, 0x7e, 0x12, 0x04, 0x7e, 0x20, 0xbe, 0x6d, 0x4c, - 0x20, 0xf6, 0x2f, 0xd1, 0x42, 0xcc, 0x61, 0xf6, 0xff, 0x6c, 0xc1, 0x84, 0xea, 0x2b, 0x6f, 0xeb, - 0x10, 0x9e, 0x6b, 0x6f, 0x03, 0xd4, 0xe4, 0x07, 0x86, 0xec, 0x9a, 0x1d, 0x79, 0xfe, 0x5c, 0x26, - 0x47, 0x93, 0x1a, 0xc6, 0x98, 0xb2, 0x2a, 0x0a, 0xb1, 0x46, 0xcd, 0xfe, 0x63, 0x0b, 0x8e, 0x24, - 0xbe, 0xe8, 0x9a, 0x1b, 0x46, 0xe8, 0x9d, 0xd4, 0x57, 0xcd, 0xf6, 0xb8, 0xf8, 0xdc, 0x90, 0x7f, - 0x93, 0xda, 0xf3, 0xb2, 0x44, 0xfb, 0xa2, 0x2b, 0xd0, 0xef, 0x46, 0xa4, 0x29, 0x3f, 0xe6, 0xb1, - 0x8e, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x99, 0xd6, 0xc4, 0x9c, 0x80, 0xfd, 0x67, 0x45, 0x18, - 0xe6, 0xfb, 0x7b, 0xc5, 0x69, 0x1d, 0xc2, 0x5c, 0x3c, 0x03, 0xc3, 0x6e, 0xb3, 0xd9, 0x8e, 0x9c, - 0xdb, 0xe2, 0xde, 0x1b, 0xe2, 0x67, 0x50, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0xca, 0xd0, 0xc7, 0xba, - 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, 0x7d, 0x9f, 0x5d, 0x74, 0x22, 0x87, 0xb3, 0x9c, 0x6a, - 0x5f, 0xd1, 0x22, 0xcc, 0x48, 0x20, 0x07, 0xe0, 0xb6, 0xeb, 0x39, 0xc1, 0x0e, 0x2d, 0x9b, 0x2e, - 0x32, 0x82, 0xcf, 0x75, 0x26, 0x38, 0xaf, 0xf0, 0x39, 0x59, 0xf5, 0x61, 0x31, 0x00, 0x6b, 0x44, - 0x67, 0x5e, 0x86, 0x61, 0x85, 0x7c, 0x10, 0xce, 0x71, 0xe6, 0x63, 0x30, 0x91, 0x68, 0xab, 0x5b, - 0xf5, 0x51, 0x9d, 0xf1, 0xfc, 0x43, 0x76, 0x64, 0x88, 0x5e, 0x2f, 0x79, 0xdb, 0xe2, 0x6e, 0xba, - 0x07, 0x47, 0x1b, 0x19, 0x47, 0xbe, 0x98, 0xd7, 0xde, 0xaf, 0x88, 0x53, 0xe2, 0xb3, 0x8f, 0x66, - 0x41, 0x71, 0x66, 0x1b, 0xc6, 0x89, 0x58, 0xe8, 0x74, 0x22, 0xd2, 0xf3, 0xee, 0xa8, 0xea, 0xfc, - 0x55, 0xb2, 0xa3, 0x0e, 0xd5, 0xef, 0x64, 0xf7, 0x4f, 0xf3, 0xd1, 0xe7, 0xc7, 0xe5, 0x88, 0x20, - 0x50, 0xbc, 0x4a, 0x76, 0xf8, 0x54, 0xe8, 0x5f, 0x57, 0xec, 0xf8, 0x75, 0x5f, 0xb3, 0x60, 0x4c, - 0x7d, 0xdd, 0x21, 0x9c, 0x0b, 0xf3, 0xe6, 0xb9, 0x70, 0xba, 0xe3, 0x02, 0xcf, 0x39, 0x11, 0xbe, - 0x5e, 0x80, 0x93, 0x0a, 0x87, 0x3e, 0xa2, 0xf8, 0x1f, 0xb1, 0xaa, 0x2e, 0xc0, 0xb0, 0xa7, 0xc4, - 0x89, 0x96, 0x29, 0xc7, 0x8b, 0x85, 0x89, 0x31, 0x0e, 0xbd, 0xf2, 0xbc, 0xf8, 0xd2, 0x1e, 0xd5, - 0xe5, 0xec, 0xe2, 0x72, 0x9f, 0x87, 0x62, 0xdb, 0xad, 0x8b, 0x0b, 0xe6, 0xc3, 0x72, 0xb4, 0x6f, - 0x94, 0x17, 0xf7, 0x77, 0x4b, 0x8f, 0xe6, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0xde, 0x28, 0x2f, - 0x62, 0x5a, 0x19, 0xcd, 0xc1, 0x84, 0xd4, 0xaa, 0xdd, 0xa4, 0x7c, 0xa9, 0xef, 0x89, 0x7b, 0x48, - 0x09, 0xcb, 0xb1, 0x09, 0xc6, 0x49, 0x7c, 0xb4, 0x08, 0x93, 0x5b, 0xed, 0xdb, 0xa4, 0x41, 0x22, - 0xfe, 0xc1, 0x57, 0x09, 0x17, 0x25, 0x0f, 0xc7, 0x4f, 0xd8, 0xab, 0x09, 0x38, 0x4e, 0xd5, 0xb0, - 0xbf, 0xcd, 0xee, 0x03, 0x31, 0x7a, 0x1a, 0x7f, 0xf3, 0x9d, 0x5c, 0xce, 0xbd, 0xac, 0x8a, 0xab, - 0x64, 0x67, 0xcd, 0xa7, 0x7c, 0x48, 0xf6, 0xaa, 0x30, 0xd6, 0x7c, 0x5f, 0xc7, 0x35, 0xff, 0xbb, - 0x05, 0x38, 0xa6, 0x46, 0xc0, 0xe0, 0x96, 0xbf, 0xdb, 0xc7, 0xe0, 0x22, 0x8c, 0xd4, 0xc9, 0xba, - 0xd3, 0x6e, 0x44, 0x4a, 0xaf, 0xd1, 0xcf, 0x55, 0x6d, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0x0e, 0x30, - 0x6c, 0xbf, 0x39, 0xc6, 0x2e, 0xe2, 0xc8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, 0xd7, 0x3c, - 0x06, 0xfd, 0x6e, 0x93, 0x32, 0x66, 0x05, 0x93, 0xdf, 0x2a, 0xd3, 0x42, 0xcc, 0x61, 0xe8, 0x09, - 0x18, 0xac, 0xf9, 0xcd, 0xa6, 0xe3, 0xd5, 0xd9, 0x95, 0x37, 0x3c, 0x3f, 0x42, 0x79, 0xb7, 0x05, - 0x5e, 0x84, 0x25, 0x8c, 0x32, 0xdf, 0x4e, 0xb0, 0xc1, 0x85, 0x3d, 0x82, 0xf9, 0x9e, 0x0b, 0x36, - 0x42, 0xcc, 0x4a, 0xe9, 0x5b, 0xf5, 0x8e, 0x1f, 0x6c, 0xb9, 0xde, 0xc6, 0xa2, 0x1b, 0x88, 0x2d, - 0xa1, 0xee, 0xc2, 0x5b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x0c, 0xfd, 0x2d, 0x3f, 0x88, 0xc2, 0xe9, - 0x01, 0x36, 0xdc, 0x8f, 0xe6, 0x1c, 0x44, 0xfc, 0x6b, 0x2b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, - 0x85, 0x98, 0x57, 0x47, 0xd7, 0x60, 0x90, 0x78, 0xdb, 0xcb, 0x81, 0xdf, 0x9c, 0x3e, 0x92, 0x4f, - 0x69, 0x89, 0xa3, 0xf0, 0x65, 0x16, 0xf3, 0xa8, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0xa3, 0x50, 0x24, - 0xde, 0xf6, 0xf4, 0x20, 0xa3, 0x34, 0x93, 0x43, 0xe9, 0xa6, 0x13, 0xc4, 0x67, 0xfe, 0x92, 0xb7, - 0x8d, 0x69, 0x1d, 0xf4, 0x09, 0x18, 0x96, 0x07, 0x46, 0x28, 0xa4, 0xa8, 0x99, 0x0b, 0x56, 0x1e, - 0x33, 0x98, 0xbc, 0xdb, 0x76, 0x03, 0xd2, 0x24, 0x5e, 0x14, 0xc6, 0x27, 0xa4, 0x84, 0x86, 0x38, - 0xa6, 0x86, 0x6a, 0x30, 0x1a, 0x90, 0xd0, 0xbd, 0x47, 0x2a, 0x7e, 0xc3, 0xad, 0xed, 0x4c, 0x9f, - 0x60, 0xdd, 0x7b, 0xaa, 0xe3, 0x90, 0x61, 0xad, 0x42, 0x2c, 0xe5, 0xd7, 0x4b, 0xb1, 0x41, 0x14, - 0xbd, 0x05, 0x63, 0x01, 0x09, 0x23, 0x27, 0x88, 0x44, 0x2b, 0xd3, 0x4a, 0x2b, 0x37, 0x86, 0x75, - 0x00, 0x7f, 0x4e, 0xc4, 0xcd, 0xc4, 0x10, 0x6c, 0x52, 0x40, 0x9f, 0x90, 0x2a, 0x87, 0x15, 0xbf, - 0xed, 0x45, 0xe1, 0xf4, 0x30, 0xeb, 0x77, 0xa6, 0x6e, 0xfa, 0x66, 0x8c, 0x97, 0xd4, 0x49, 0xf0, - 0xca, 0xd8, 0x20, 0x85, 0x3e, 0x05, 0x63, 0xfc, 0x3f, 0x57, 0xa9, 0x86, 0xd3, 0xc7, 0x18, 0xed, - 0xb3, 0xf9, 0xb4, 0x39, 0xe2, 0xfc, 0x31, 0x41, 0x7c, 0x4c, 0x2f, 0x0d, 0xb1, 0x49, 0x0d, 0x61, - 0x18, 0x6b, 0xb8, 0xdb, 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0xdb, 0x44, 0x48, 0x88, 0x4f, 0x66, - 0xab, 0x60, 0xfd, 0xdb, 0x44, 0x3c, 0x02, 0xf5, 0x3a, 0xd8, 0x24, 0x81, 0x6e, 0xc0, 0x38, 0x7d, - 0x92, 0xbb, 0x31, 0xd1, 0x91, 0x6e, 0x44, 0xd9, 0xc3, 0x19, 0x1b, 0x95, 0x70, 0x82, 0x08, 0xba, - 0x0e, 0xa3, 0x6c, 0xcc, 0xdb, 0x2d, 0x4e, 0xf4, 0x78, 0x37, 0xa2, 0xcc, 0xa0, 0xa0, 0xaa, 0x55, - 0xc1, 0x06, 0x01, 0xf4, 0x26, 0x0c, 0x37, 0xdc, 0x75, 0x52, 0xdb, 0xa9, 0x35, 0xc8, 0xf4, 0x28, - 0xa3, 0x96, 0x79, 0x18, 0x5e, 0x93, 0x48, 0x9c, 0x3f, 0x57, 0x7f, 0x71, 0x5c, 0x1d, 0xdd, 0x84, - 0xe3, 0x11, 0x09, 0x9a, 0xae, 0xe7, 0xd0, 0x43, 0x4c, 0x3c, 0x09, 0x99, 0x66, 0x7c, 0x8c, 0xad, - 0xae, 0x33, 0x62, 0x36, 0x8e, 0xaf, 0x65, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x2e, 0x4c, 0x67, 0x40, - 0xf8, 0xba, 0x3d, 0xca, 0x28, 0xbf, 0x26, 0x28, 0x4f, 0xaf, 0xe5, 0xe0, 0xed, 0x77, 0x80, 0xe1, - 0x5c, 0xea, 0xe8, 0x3a, 0x4c, 0xb0, 0x93, 0xb3, 0xd2, 0x6e, 0x34, 0x44, 0x83, 0xe3, 0xac, 0xc1, - 0x27, 0x24, 0x1f, 0x51, 0x36, 0xc1, 0xfb, 0xbb, 0x25, 0x88, 0xff, 0xe1, 0x64, 0x6d, 0x74, 0x9b, - 0x29, 0x61, 0xdb, 0x81, 0x1b, 0xed, 0xd0, 0x5d, 0x45, 0xee, 0x46, 0xd3, 0x13, 0x1d, 0x05, 0x52, - 0x3a, 0xaa, 0xd2, 0xd4, 0xea, 0x85, 0x38, 0x49, 0x90, 0x5e, 0x05, 0x61, 0x54, 0x77, 0xbd, 0xe9, - 0x49, 0xfe, 0x9e, 0x92, 0x27, 0x69, 0x95, 0x16, 0x62, 0x0e, 0x63, 0x0a, 0x58, 0xfa, 0xe3, 0x3a, - 0xbd, 0x71, 0xa7, 0x18, 0x62, 0xac, 0x80, 0x95, 0x00, 0x1c, 0xe3, 0x50, 0x26, 0x38, 0x8a, 0x76, - 0xa6, 0x11, 0x43, 0x55, 0x07, 0xe2, 0xda, 0xda, 0x27, 0x30, 0x2d, 0xb7, 0x6f, 0xc3, 0xb8, 0x3a, - 0x26, 0xd8, 0x98, 0xa0, 0x12, 0xf4, 0x33, 0xb6, 0x4f, 0x88, 0x4f, 0x87, 0x69, 0x17, 0x18, 0x4b, - 0x88, 0x79, 0x39, 0xeb, 0x82, 0x7b, 0x8f, 0xcc, 0xef, 0x44, 0x84, 0xcb, 0x22, 0x8a, 0x5a, 0x17, - 0x24, 0x00, 0xc7, 0x38, 0xf6, 0xbf, 0xe7, 0xec, 0x73, 0x7c, 0x4b, 0xf4, 0x70, 0x2f, 0x3e, 0x0b, - 0x43, 0xcc, 0xf0, 0xc3, 0x0f, 0xb8, 0x76, 0xb6, 0x3f, 0x66, 0x98, 0xaf, 0x88, 0x72, 0xac, 0x30, - 0xd0, 0xab, 0x30, 0x56, 0xd3, 0x1b, 0x10, 0x97, 0xba, 0x3a, 0x46, 0x8c, 0xd6, 0xb1, 0x89, 0x8b, - 0x2e, 0xc1, 0x10, 0xb3, 0x71, 0xaa, 0xf9, 0x0d, 0xc1, 0x6d, 0x4a, 0xce, 0x64, 0xa8, 0x22, 0xca, - 0xf7, 0xb5, 0xdf, 0x58, 0x61, 0xa3, 0x73, 0x30, 0x40, 0xbb, 0x50, 0xae, 0x88, 0xeb, 0x54, 0x49, - 0x02, 0xaf, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0x1f, 0x5b, 0x8c, 0x97, 0x4a, 0x9f, 0xf9, 0xe8, 0x0a, - 0xbb, 0x34, 0xd8, 0x0d, 0xa2, 0x69, 0xe1, 0x1f, 0xd7, 0x6e, 0x02, 0x05, 0xdb, 0x4f, 0xfc, 0xc7, - 0x46, 0x4d, 0xf4, 0x76, 0xf2, 0x66, 0xe0, 0x0c, 0xc5, 0x8b, 0x72, 0x08, 0x92, 0xb7, 0xc3, 0x23, - 0xf1, 0x15, 0x47, 0xfb, 0xd3, 0xe9, 0x8a, 0xb0, 0x7f, 0xaa, 0xa0, 0xad, 0x92, 0x6a, 0xe4, 0x44, - 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x21, 0xf8, 0xbe, 0xce, 0x17, 0x1d, 0xab, - 0x74, 0x8b, 0x57, 0xe0, 0xdc, 0x8b, 0xf8, 0x83, 0x25, 0x19, 0x4a, 0x31, 0x68, 0x7b, 0x1e, 0xa5, - 0x58, 0xe8, 0x95, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, 0x4b, 0x32, 0xe8, 0x1d, 0x00, 0x79, - 0x42, 0x90, 0xba, 0x90, 0x1d, 0x3e, 0xdb, 0x9d, 0xe8, 0x9a, 0xaa, 0xc3, 0x85, 0x93, 0xf1, 0x7f, - 0xac, 0xd1, 0xb3, 0x23, 0x6d, 0x4e, 0xf5, 0xce, 0xa0, 0x4f, 0xd2, 0x2d, 0xea, 0x04, 0x11, 0xa9, - 0xcf, 0x45, 0x62, 0x70, 0x9e, 0xee, 0xed, 0x71, 0xb8, 0xe6, 0x36, 0x89, 0xbe, 0x9d, 0x05, 0x11, - 0x1c, 0xd3, 0xb3, 0x7f, 0xbf, 0x08, 0xd3, 0x79, 0xdd, 0xa5, 0x9b, 0x86, 0xdc, 0x75, 0xa3, 0x05, - 0xca, 0xd6, 0x5a, 0xe6, 0xa6, 0x59, 0x12, 0xe5, 0x58, 0x61, 0xd0, 0xd5, 0x1b, 0xba, 0x1b, 0xf2, - 0x6d, 0xdf, 0x1f, 0xaf, 0xde, 0x2a, 0x2b, 0xc5, 0x02, 0x4a, 0xf1, 0x02, 0xe2, 0x84, 0xc2, 0xf8, - 0x4e, 0x5b, 0xe5, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xa5, 0x8c, 0x7d, 0x5d, 0xa4, 0x8c, 0xc6, 0x10, - 0xf5, 0x3f, 0xd8, 0x21, 0x42, 0x9f, 0x06, 0x58, 0x77, 0x3d, 0x37, 0xdc, 0x64, 0xd4, 0x07, 0x0e, - 0x4c, 0x5d, 0x31, 0xc5, 0xcb, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x60, 0x44, 0x1d, 0x20, 0xe5, - 0x45, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x11, 0xeb, 0x78, 0xf6, 0x67, 0x93, 0xeb, - 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x5e, 0xc7, 0xb7, 0xd0, 0x79, 0x7c, 0xed, 0xbf, 0x1e, 0x86, - 0x09, 0xa3, 0xb1, 0x76, 0xd8, 0xc3, 0x99, 0x7b, 0x99, 0x5e, 0x40, 0x4e, 0x44, 0xc4, 0xfe, 0xb3, - 0xbb, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xe1, 0x86, 0x13, 0x32, - 0x89, 0x25, 0x11, 0xfb, 0xae, 0x17, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed, 0xd6, 0xe7, 0xb4, - 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c, 0xd8, 0x0e, 0xe6, - 0x30, 0x74, 0x89, 0x1d, 0xad, 0x74, 0x55, 0x2c, 0x50, 0x6e, 0x94, 0x2d, 0xb3, 0x7e, 0x83, 0xc9, - 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0x81, 0x0e, 0x6f, 0xb2, 0xa7, 0x60, 0x90, 0xfd, 0x50, - 0x2b, 0x40, 0xcd, 0x46, 0x99, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0xa1, 0xde, 0x16, 0x0c, 0x7d, - 0xf5, 0x89, 0x45, 0xcd, 0xcc, 0x2e, 0x86, 0xf8, 0x29, 0x27, 0x96, 0x3c, 0x96, 0x30, 0xf4, 0x6b, - 0x16, 0x20, 0xa7, 0x41, 0x5f, 0xcb, 0xb4, 0x58, 0x3d, 0x6e, 0x80, 0xb1, 0xda, 0xaf, 0x76, 0x1d, - 0xf6, 0x76, 0x38, 0x3b, 0x97, 0xaa, 0xcd, 0x25, 0xa5, 0xaf, 0x88, 0x2e, 0xa2, 0x34, 0x82, 0x7e, - 0x19, 0x5d, 0x73, 0xc3, 0xe8, 0xf3, 0x7f, 0x9f, 0xb8, 0x9c, 0x32, 0xba, 0x84, 0x6e, 0xe8, 0x8f, - 0xaf, 0x91, 0x03, 0x3e, 0xbe, 0xc6, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0, 0x8c, 0xb2, 0x2f, - 0x7f, 0xa2, 0xcb, 0x03, 0x46, 0x88, 0xd3, 0x7b, 0x79, 0xc6, 0x54, 0x84, 0x1e, 0x78, 0x8c, 0x75, - 0xb9, 0xf3, 0x23, 0xf8, 0x46, 0x48, 0x82, 0xf9, 0x93, 0x52, 0x4d, 0xbc, 0xaf, 0xf3, 0x1e, 0x9a, - 0xde, 0xf8, 0x87, 0x2c, 0x98, 0x4e, 0x0f, 0x10, 0xef, 0xd2, 0xf4, 0x38, 0xeb, 0xbf, 0xdd, 0x69, - 0x64, 0x44, 0xe7, 0xa5, 0xb9, 0xeb, 0xf4, 0x5c, 0x0e, 0x2d, 0x9c, 0xdb, 0x0a, 0xba, 0x04, 0x10, - 0x46, 0x7e, 0x8b, 0x9f, 0xf5, 0x8c, 0x99, 0x1d, 0x66, 0x06, 0x17, 0x50, 0x55, 0xa5, 0xfb, 0xf1, - 0x5d, 0xa0, 0xe1, 0xce, 0xb4, 0xe1, 0x44, 0xce, 0x8a, 0xc9, 0x90, 0x77, 0x2f, 0xea, 0xf2, 0xee, - 0x2e, 0x52, 0xd2, 0x59, 0x39, 0xa7, 0xb3, 0x6f, 0xb5, 0x1d, 0x2f, 0x72, 0xa3, 0x1d, 0x5d, 0x3e, - 0xee, 0x81, 0x39, 0x94, 0xe8, 0x53, 0xd0, 0xdf, 0x70, 0xbd, 0xf6, 0x5d, 0x71, 0xc7, 0x9e, 0xcb, - 0x7e, 0xfe, 0x78, 0xed, 0xbb, 0xe6, 0xe4, 0x94, 0xe8, 0x56, 0x66, 0xe5, 0xfb, 0xbb, 0x25, 0x94, - 0x46, 0xc0, 0x9c, 0xaa, 0xfd, 0x34, 0x8c, 0x2f, 0x3a, 0xa4, 0xe9, 0x7b, 0x4b, 0x5e, 0xbd, 0xe5, - 0xbb, 0x5e, 0x84, 0xa6, 0xa1, 0x8f, 0x31, 0x97, 0xfc, 0x6a, 0xed, 0xa3, 0x83, 0x8f, 0x59, 0x89, - 0xbd, 0x01, 0xc7, 0x16, 0xfd, 0x3b, 0xde, 0x1d, 0x27, 0xa8, 0xcf, 0x55, 0xca, 0x9a, 0xbc, 0x70, - 0x55, 0xca, 0xab, 0xac, 0x7c, 0x69, 0x80, 0x56, 0x93, 0x2f, 0xc2, 0x65, 0xb7, 0x41, 0x72, 0xa4, - 0xba, 0x3f, 0x5b, 0x30, 0x5a, 0x8a, 0xf1, 0x95, 0x4e, 0xd2, 0xca, 0x35, 0x67, 0x78, 0x0b, 0x86, - 0xd6, 0x5d, 0xd2, 0xa8, 0x63, 0xb2, 0x2e, 0x66, 0xe3, 0xc9, 0x7c, 0x83, 0xc7, 0x65, 0x8a, 0xa9, - 0x94, 0xa7, 0x4c, 0xda, 0xb5, 0x2c, 0x2a, 0x63, 0x45, 0x06, 0x6d, 0xc1, 0xa4, 0x9c, 0x33, 0x09, - 0x15, 0xe7, 0xfd, 0x53, 0x9d, 0x96, 0xaf, 0x49, 0x9c, 0x19, 0x7f, 0xe3, 0x04, 0x19, 0x9c, 0x22, - 0x8c, 0x4e, 0x41, 0x5f, 0x93, 0x72, 0x36, 0x7d, 0x6c, 0xf8, 0x99, 0x78, 0x8b, 0x49, 0xea, 0x58, - 0xa9, 0xfd, 0xf3, 0x16, 0x9c, 0x48, 0x8d, 0x8c, 0x90, 0x58, 0x3e, 0xe0, 0x59, 0x48, 0x4a, 0x10, - 0x0b, 0xdd, 0x25, 0x88, 0xf6, 0x7f, 0x61, 0xc1, 0xd1, 0xa5, 0x66, 0x2b, 0xda, 0x59, 0x74, 0x4d, - 0xdb, 0x83, 0x97, 0x61, 0xa0, 0x49, 0xea, 0x6e, 0xbb, 0x29, 0x66, 0xae, 0x24, 0x6f, 0xff, 0x15, - 0x56, 0x4a, 0x4f, 0x90, 0x6a, 0xe4, 0x07, 0xce, 0x06, 0xe1, 0x05, 0x58, 0xa0, 0x33, 0x1e, 0xca, - 0xbd, 0x47, 0xae, 0xb9, 0x4d, 0x37, 0xba, 0xbf, 0xdd, 0x25, 0xcc, 0x06, 0x24, 0x11, 0x1c, 0xd3, - 0xb3, 0xbf, 0x69, 0xc1, 0x84, 0x5c, 0xf7, 0x73, 0xf5, 0x7a, 0x40, 0xc2, 0x10, 0xcd, 0x40, 0xc1, - 0x6d, 0x89, 0x5e, 0x82, 0xe8, 0x65, 0xa1, 0x5c, 0xc1, 0x05, 0xb7, 0x25, 0x9f, 0x6b, 0x8c, 0xc1, - 0x28, 0x9a, 0x16, 0x14, 0x57, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x3c, 0x0c, 0x79, 0x7e, 0x9d, 0xbf, - 0x78, 0x84, 0x0e, 0x9d, 0x62, 0xae, 0x8a, 0x32, 0xac, 0xa0, 0xa8, 0x02, 0xc3, 0xdc, 0xbe, 0x36, - 0x5e, 0xb4, 0x3d, 0x59, 0xe9, 0xb2, 0x2f, 0x5b, 0x93, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0x53, 0x0b, - 0x46, 0xe5, 0x97, 0xf5, 0xf8, 0x16, 0xa5, 0x5b, 0x2b, 0x7e, 0x87, 0xc6, 0x5b, 0x8b, 0xbe, 0x25, - 0x19, 0xc4, 0x78, 0x42, 0x16, 0x0f, 0xf4, 0x84, 0xbc, 0x08, 0x23, 0x4e, 0xab, 0x55, 0x31, 0xdf, - 0x9f, 0x6c, 0x29, 0xcd, 0xc5, 0xc5, 0x58, 0xc7, 0xb1, 0x7f, 0xae, 0x00, 0xe3, 0xf2, 0x0b, 0xaa, - 0xed, 0xdb, 0x21, 0x89, 0xd0, 0x1a, 0x0c, 0x3b, 0x7c, 0x96, 0x88, 0x5c, 0xe4, 0x8f, 0x65, 0xcb, - 0x45, 0x8d, 0x29, 0x8d, 0x19, 0xe9, 0x39, 0x59, 0x1b, 0xc7, 0x84, 0x50, 0x03, 0xa6, 0x3c, 0x3f, - 0x62, 0x4c, 0x95, 0x82, 0x77, 0x52, 0x55, 0x27, 0xa9, 0x9f, 0x14, 0xd4, 0xa7, 0x56, 0x93, 0x54, - 0x70, 0x9a, 0x30, 0x5a, 0x92, 0xb2, 0xe6, 0x62, 0xbe, 0x90, 0x50, 0x9f, 0xb8, 0x6c, 0x51, 0xb3, - 0xfd, 0x47, 0x16, 0x0c, 0x4b, 0xb4, 0xc3, 0xb0, 0x4a, 0x58, 0x81, 0xc1, 0x90, 0x4d, 0x82, 0x1c, - 0x1a, 0xbb, 0x53, 0xc7, 0xf9, 0x7c, 0xc5, 0xbc, 0x22, 0xff, 0x1f, 0x62, 0x49, 0x83, 0xa9, 0x1a, - 0x55, 0xf7, 0xdf, 0x27, 0xaa, 0x46, 0xd5, 0x9f, 0x9c, 0x4b, 0xe9, 0x1f, 0x58, 0x9f, 0x35, 0xd9, - 0x3d, 0x7d, 0xd2, 0xb4, 0x02, 0xb2, 0xee, 0xde, 0x4d, 0x3e, 0x69, 0x2a, 0xac, 0x14, 0x0b, 0x28, - 0x7a, 0x07, 0x46, 0x6b, 0x52, 0xc7, 0x14, 0xef, 0xf0, 0x73, 0x1d, 0xf5, 0x9d, 0x4a, 0x35, 0xce, - 0x65, 0xa4, 0x0b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xed, 0xc7, 0x8a, 0xdd, 0xec, 0xc7, 0x62, 0xba, - 0xf9, 0xd6, 0x54, 0xbf, 0x60, 0xc1, 0x00, 0xd7, 0x2d, 0xf4, 0xa6, 0xda, 0xd1, 0x2c, 0x05, 0xe2, - 0xb1, 0xbb, 0x49, 0x0b, 0x05, 0x67, 0x83, 0x56, 0x60, 0x98, 0xfd, 0x60, 0xba, 0x91, 0x62, 0xbe, - 0xb7, 0x19, 0x6f, 0x55, 0xef, 0xe0, 0x4d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x9f, 0x2e, 0xd2, 0xd3, - 0x2d, 0x46, 0x35, 0x2e, 0x7d, 0xeb, 0xe1, 0x5d, 0xfa, 0x85, 0x87, 0x75, 0xe9, 0x6f, 0xc0, 0x44, - 0x4d, 0xb3, 0x2b, 0x88, 0x67, 0xf2, 0x7c, 0xc7, 0x45, 0xa2, 0x99, 0x20, 0x70, 0xe9, 0xeb, 0x82, - 0x49, 0x04, 0x27, 0xa9, 0xa2, 0x4f, 0xc2, 0x28, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xc1, 0x7b, 0x22, - 0x7f, 0xbd, 0xe8, 0x4d, 0x70, 0x69, 0xbd, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x7f, 0xb1, 0x00, 0x2d, - 0xb5, 0x36, 0x49, 0x93, 0x04, 0x4e, 0x23, 0x56, 0x0f, 0x7e, 0xc9, 0x82, 0x69, 0x92, 0x2a, 0x5e, - 0xf0, 0x9b, 0x4d, 0x21, 0x0c, 0xc8, 0x91, 0x57, 0x2d, 0xe5, 0xd4, 0x89, 0x1f, 0x04, 0x79, 0x18, - 0x38, 0xb7, 0x3d, 0xb4, 0x02, 0x47, 0xf8, 0x2d, 0xa9, 0x00, 0x9a, 0x95, 0xde, 0x23, 0x82, 0xf0, - 0x91, 0xb5, 0x34, 0x0a, 0xce, 0xaa, 0x67, 0xff, 0xd1, 0x18, 0xe4, 0xf6, 0xe2, 0x03, 0xbd, 0xe8, - 0x07, 0x7a, 0xd1, 0x0f, 0xf4, 0xa2, 0x1f, 0xe8, 0x45, 0x3f, 0xd0, 0x8b, 0x7e, 0xa0, 0x17, 0x7d, - 0x9f, 0xea, 0x45, 0x7f, 0xc6, 0x82, 0x63, 0xea, 0xfa, 0x32, 0x1e, 0xec, 0x9f, 0x83, 0x23, 0x7c, - 0xbb, 0x2d, 0x34, 0x1c, 0xb7, 0xb9, 0x46, 0x9a, 0xad, 0x86, 0x13, 0x49, 0xeb, 0xa7, 0x8b, 0x99, - 0x2b, 0x37, 0xe1, 0x62, 0x61, 0x54, 0xe4, 0xbe, 0x6a, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0xfb, - 0x43, 0xd0, 0xbf, 0xb4, 0x4d, 0xbc, 0xe8, 0x10, 0x9e, 0x36, 0x35, 0x18, 0x77, 0xbd, 0x6d, 0xbf, - 0xb1, 0x4d, 0xea, 0x1c, 0x7e, 0x90, 0x17, 0xf8, 0x71, 0x41, 0x7a, 0xbc, 0x6c, 0x90, 0xc0, 0x09, - 0x92, 0x0f, 0x43, 0xbb, 0x74, 0x19, 0x06, 0xf8, 0xe5, 0x23, 0x54, 0x4b, 0x99, 0x67, 0x36, 0x1b, - 0x44, 0x71, 0xa5, 0xc6, 0x9a, 0x2f, 0x7e, 0xb9, 0x89, 0xea, 0xe8, 0xb3, 0x30, 0xbe, 0xee, 0x06, - 0x61, 0xb4, 0xe6, 0x36, 0xe9, 0xd5, 0xd0, 0x6c, 0xdd, 0x87, 0x36, 0x49, 0x8d, 0xc3, 0xb2, 0x41, - 0x09, 0x27, 0x28, 0xa3, 0x0d, 0x18, 0x6b, 0x38, 0x7a, 0x53, 0x83, 0x07, 0x6e, 0x4a, 0xdd, 0x0e, - 0xd7, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0xb7, 0x53, 0x8d, 0x29, 0x44, 0x86, 0x98, 0x38, 0x43, 0x6d, - 0x27, 0xae, 0x09, 0xe1, 0x30, 0xca, 0xa0, 0x31, 0x47, 0x85, 0x61, 0x93, 0x41, 0xd3, 0xdc, 0x11, - 0x3e, 0x03, 0xc3, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x2e, 0x98, 0x0b, 0xbd, 0xf5, 0x75, 0xc5, 0xad, - 0x05, 0xbe, 0xa9, 0xc7, 0x5b, 0x92, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57, - 0xe9, 0x0a, 0x3a, 0x4c, 0x23, 0x43, 0xe3, 0xce, 0x90, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72, - 0x98, 0x28, 0x96, 0x5d, 0x06, 0xda, 0xf2, 0x9a, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x9b, 0x30, 0x18, - 0x90, 0x06, 0x53, 0x14, 0x8f, 0xf5, 0xbe, 0xc8, 0xb9, 0xde, 0x99, 0xd7, 0xc3, 0x92, 0x00, 0xba, - 0x0a, 0x28, 0x20, 0x94, 0xc1, 0x73, 0xbd, 0x0d, 0x65, 0xbe, 0x2f, 0x0e, 0x5a, 0xc5, 0x48, 0xe3, - 0x18, 0x43, 0xfa, 0xc1, 0xe2, 0x8c, 0x6a, 0xe8, 0x32, 0x4c, 0xa9, 0xd2, 0xb2, 0x17, 0x46, 0x0e, - 0x3d, 0xe0, 0xb8, 0xb8, 0x5e, 0xc9, 0x57, 0x70, 0x12, 0x01, 0xa7, 0xeb, 0xd8, 0xbf, 0x61, 0x01, - 0x1f, 0xe7, 0x43, 0x90, 0x2a, 0xbc, 0x6e, 0x4a, 0x15, 0x4e, 0xe6, 0xce, 0x5c, 0x8e, 0x44, 0xe1, - 0x37, 0x2c, 0x18, 0xd1, 0x66, 0x36, 0x5e, 0xb3, 0x56, 0x87, 0x35, 0xdb, 0x86, 0x49, 0xba, 0xd2, - 0xaf, 0xdf, 0x0e, 0x49, 0xb0, 0x4d, 0xea, 0x6c, 0x61, 0x16, 0xee, 0x6f, 0x61, 0x2a, 0x53, 0xe1, - 0x6b, 0x09, 0x82, 0x38, 0xd5, 0x84, 0xfd, 0x19, 0xd9, 0x55, 0x65, 0x59, 0x5d, 0x53, 0x73, 0x9e, - 0xb0, 0xac, 0x56, 0xb3, 0x8a, 0x63, 0x1c, 0xba, 0xd5, 0x36, 0xfd, 0x30, 0x4a, 0x5a, 0x56, 0x5f, - 0xf1, 0xc3, 0x08, 0x33, 0x88, 0xfd, 0x02, 0xc0, 0xd2, 0x5d, 0x52, 0xe3, 0x2b, 0x56, 0x7f, 0xf4, - 0x58, 0xf9, 0x8f, 0x1e, 0xfb, 0x6f, 0x2c, 0x18, 0x5f, 0x5e, 0x30, 0x6e, 0xae, 0x59, 0x00, 0xfe, - 0x52, 0xbb, 0x75, 0x6b, 0x55, 0x9a, 0xf7, 0x70, 0x0b, 0x07, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x12, - 0x8a, 0x8d, 0xb6, 0x27, 0xc4, 0x9e, 0x83, 0xf4, 0x7a, 0xbc, 0xd6, 0xf6, 0x30, 0x2d, 0xd3, 0x7c, - 0xe0, 0x8a, 0x3d, 0xfb, 0xc0, 0x75, 0x0d, 0xc5, 0x83, 0x4a, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7, - 0x11, 0x06, 0x84, 0xe9, 0xd1, 0xad, 0x5b, 0xe5, 0xc5, 0x10, 0xf3, 0x72, 0xfb, 0xcb, 0x45, 0x98, - 0x59, 0x6e, 0x90, 0xbb, 0xef, 0x31, 0xca, 0x42, 0xaf, 0x1e, 0x7c, 0x07, 0x13, 0x20, 0x1d, 0xd4, - 0x4b, 0xb3, 0xfb, 0x78, 0xac, 0xc3, 0x20, 0x37, 0x2c, 0x96, 0x31, 0x17, 0x32, 0xd5, 0xb9, 0xf9, - 0x03, 0x32, 0xcb, 0x0d, 0x94, 0x85, 0x3a, 0x57, 0x5d, 0x98, 0xa2, 0x14, 0x4b, 0xe2, 0x33, 0xaf, - 0xc0, 0xa8, 0x8e, 0x79, 0x20, 0x7f, 0xe9, 0x1f, 0x2e, 0xc2, 0x24, 0xed, 0xc1, 0x43, 0x9d, 0x88, - 0x1b, 0xe9, 0x89, 0x78, 0xd0, 0x3e, 0xb3, 0xdd, 0x67, 0xe3, 0x9d, 0xe4, 0x6c, 0x5c, 0xcc, 0x9b, - 0x8d, 0xc3, 0x9e, 0x83, 0x1f, 0xb1, 0xe0, 0xc8, 0x72, 0xc3, 0xaf, 0x6d, 0x25, 0xfc, 0x5a, 0x5f, - 0x82, 0x11, 0x7a, 0x1c, 0x87, 0x46, 0x88, 0x17, 0x23, 0xe8, 0x8f, 0x00, 0x61, 0x1d, 0x4f, 0xab, - 0x76, 0xe3, 0x46, 0x79, 0x31, 0x2b, 0x56, 0x90, 0x00, 0x61, 0x1d, 0xcf, 0xfe, 0x4b, 0x0b, 0x4e, - 0x5f, 0x5e, 0x58, 0x8a, 0x97, 0x62, 0x2a, 0x5c, 0xd1, 0x39, 0x18, 0x68, 0xd5, 0xb5, 0xae, 0xc4, - 0x62, 0xe1, 0x45, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0x44, 0x06, 0xbb, 0x01, 0x70, 0x19, 0x57, 0x16, - 0xc4, 0xb9, 0x2b, 0xb5, 0x40, 0x56, 0xae, 0x16, 0xe8, 0x09, 0x18, 0xa4, 0xf7, 0x82, 0x5b, 0x93, - 0xfd, 0xe6, 0x06, 0x1b, 0xbc, 0x08, 0x4b, 0x98, 0xfd, 0xeb, 0x16, 0x1c, 0xb9, 0xec, 0x46, 0xf4, - 0xd2, 0x4e, 0xc6, 0xe3, 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x93, 0x8c, 0xc7, 0x83, 0x15, - 0x04, 0x6b, 0x58, 0xfc, 0x83, 0xb6, 0x5d, 0xe6, 0x29, 0x53, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, - 0x85, 0x41, 0xc7, 0xab, 0xee, 0x06, 0x4c, 0x64, 0xb9, 0x23, 0x0e, 0x6e, 0x35, 0x5e, 0x8b, 0x12, - 0x80, 0x63, 0x1c, 0xfb, 0x9f, 0x2c, 0x28, 0x5d, 0xe6, 0xfe, 0xbe, 0xeb, 0x61, 0xce, 0xa1, 0xfb, - 0x02, 0x0c, 0x13, 0xa9, 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xb0, 0x40, 0x0a, - 0xaf, 0x07, 0xe7, 0xfb, 0x83, 0x79, 0x4f, 0x2f, 0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x9c, 0x24, 0x16, - 0x70, 0x65, 0x29, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb7, 0xe0, 0x98, 0xfa, 0xe0, 0xf7, 0xdd, - 0x67, 0xda, 0xbf, 0x53, 0x80, 0xb1, 0x2b, 0x6b, 0x6b, 0x95, 0xcb, 0x24, 0xd2, 0x56, 0x65, 0x67, - 0xb5, 0x3f, 0xd6, 0xb4, 0x97, 0x9d, 0xde, 0x88, 0xed, 0xc8, 0x6d, 0xcc, 0xf2, 0xe8, 0x7f, 0xb3, - 0x65, 0x2f, 0xba, 0x1e, 0x54, 0xa3, 0xc0, 0xf5, 0x36, 0x32, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, - 0x3c, 0x0b, 0x7a, 0x01, 0x06, 0x58, 0xf8, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0xdd, - 0xdf, 0x2d, 0x0d, 0xdf, 0xc0, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x03, 0x46, 0x36, 0xa3, 0xa8, - 0x75, 0x85, 0x38, 0x75, 0x12, 0xc8, 0x53, 0xf6, 0x4c, 0xd6, 0x29, 0x4b, 0x07, 0x81, 0xa3, 0xc5, - 0x07, 0x53, 0x5c, 0x16, 0x62, 0x9d, 0x8e, 0x5d, 0x05, 0x88, 0x61, 0x0f, 0x48, 0x71, 0x63, 0xaf, - 0xc1, 0x30, 0xfd, 0xdc, 0xb9, 0x86, 0xeb, 0x74, 0x56, 0x8d, 0x3f, 0x03, 0xc3, 0x52, 0xf1, 0x1d, - 0x8a, 0xe0, 0x20, 0xec, 0x46, 0x92, 0x7a, 0xf1, 0x10, 0xc7, 0x70, 0xfb, 0x71, 0x10, 0xb6, 0xc3, - 0x9d, 0x48, 0xda, 0xeb, 0x70, 0x94, 0x19, 0x41, 0x3b, 0xd1, 0xa6, 0xb1, 0x46, 0xbb, 0x2f, 0x86, - 0x67, 0xc5, 0xbb, 0xae, 0xa0, 0xec, 0x7d, 0xa4, 0xf3, 0xf9, 0xa8, 0xa4, 0x18, 0xbf, 0xf1, 0xec, - 0x7f, 0xec, 0x83, 0x47, 0xca, 0xd5, 0xfc, 0xa8, 0x56, 0x97, 0x60, 0x94, 0xb3, 0x8b, 0x74, 0x69, - 0x38, 0x0d, 0xd1, 0xae, 0x92, 0x80, 0xae, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0xd3, 0x50, 0x74, 0xdf, - 0xf5, 0x92, 0xae, 0x99, 0xe5, 0xb7, 0x56, 0x31, 0x2d, 0xa7, 0x60, 0xca, 0x79, 0xf2, 0x23, 0x5d, - 0x81, 0x15, 0xf7, 0xf9, 0x3a, 0x8c, 0xbb, 0x61, 0x2d, 0x74, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, - 0x57, 0x32, 0x07, 0xda, 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xef, 0x99, 0x7b, 0xed, - 0x1a, 0x53, 0x83, 0x1e, 0xff, 0x2d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, - 0x88, 0x25, 0x8c, 0x3e, 0xe8, 0x6a, 0x9b, 0x4e, 0x6b, 0xae, 0x1d, 0x6d, 0x2e, 0xba, 0x61, 0xcd, - 0xdf, 0x26, 0xc1, 0x0e, 0x7b, 0x8b, 0x0f, 0xc5, 0x0f, 0x3a, 0x05, 0x58, 0xb8, 0x32, 0x57, 0xa1, - 0x98, 0x38, 0x5d, 0x07, 0xcd, 0xc1, 0x84, 0x2c, 0xac, 0x92, 0x90, 0x5d, 0x01, 0x23, 0x8c, 0x8c, - 0x72, 0x96, 0x14, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, - 0x98, 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x97, 0x75, - 0x00, 0x36, 0xf1, 0xec, 0xff, 0xb3, 0x0f, 0xa6, 0xd8, 0xb4, 0x7d, 0xb0, 0xc2, 0xbe, 0x97, 0x56, - 0xd8, 0x8d, 0xf4, 0x0a, 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc5, 0x64, - 0xdc, 0xc6, 0x32, 0xbb, 0x00, 0xc3, 0x81, 0xe1, 0xc7, 0x3a, 0xac, 0x2b, 0xb5, 0xa4, 0x4b, 0x6a, - 0x8c, 0x83, 0xde, 0x00, 0x68, 0xc5, 0x32, 0xf4, 0x82, 0x11, 0x7c, 0x14, 0x72, 0xc5, 0xe7, 0x5a, - 0x1d, 0xfb, 0xb3, 0x30, 0xac, 0x1c, 0x55, 0xa5, 0xa7, 0xba, 0x95, 0xe3, 0xa9, 0xde, 0x9d, 0x8d, - 0x90, 0xb6, 0x71, 0xc5, 0x4c, 0xdb, 0xb8, 0xff, 0xcb, 0x82, 0x58, 0xc3, 0x81, 0xde, 0x82, 0xe1, - 0x96, 0xcf, 0x4c, 0xa9, 0x03, 0xe9, 0x9f, 0xf0, 0x78, 0x47, 0x15, 0x09, 0x8f, 0x30, 0x18, 0xf0, - 0xe9, 0xa8, 0xc8, 0xaa, 0x38, 0xa6, 0x82, 0xae, 0xc2, 0x60, 0x2b, 0x20, 0xd5, 0x88, 0x85, 0xbf, - 0xea, 0x9d, 0x20, 0x5f, 0xbe, 0xbc, 0x22, 0x96, 0x14, 0x12, 0x96, 0xa9, 0xc5, 0xde, 0x2d, 0x53, - 0xed, 0xdf, 0x2a, 0xc0, 0x64, 0xb2, 0x11, 0xf4, 0x1a, 0xf4, 0x91, 0xbb, 0xa4, 0x26, 0xbe, 0x34, - 0x93, 0x9b, 0x88, 0xa5, 0x2b, 0x7c, 0xe8, 0xe8, 0x7f, 0xcc, 0x6a, 0xa1, 0x2b, 0x30, 0x48, 0x59, - 0x89, 0xcb, 0x2a, 0x48, 0xe4, 0xa3, 0x79, 0xec, 0x88, 0xe2, 0xc9, 0xf8, 0x67, 0x89, 0x22, 0x2c, - 0xab, 0x33, 0x53, 0xb6, 0x5a, 0xab, 0x4a, 0x5f, 0x69, 0x51, 0x27, 0x61, 0xc2, 0xda, 0x42, 0x85, - 0x23, 0x09, 0x6a, 0xdc, 0x94, 0x4d, 0x16, 0xe2, 0x98, 0x08, 0x7a, 0x03, 0xfa, 0xc3, 0x06, 0x21, - 0x2d, 0x61, 0xab, 0x90, 0x29, 0x1f, 0xad, 0x52, 0x04, 0x41, 0x89, 0xc9, 0x53, 0x58, 0x01, 0xe6, - 0x15, 0xed, 0xdf, 0xb5, 0x00, 0xb8, 0xed, 0x9f, 0xe3, 0x6d, 0x90, 0x43, 0x50, 0x29, 0x2c, 0x42, - 0x5f, 0xd8, 0x22, 0xb5, 0x4e, 0x1e, 0x06, 0x71, 0x7f, 0xaa, 0x2d, 0x52, 0x8b, 0x57, 0x3b, 0xfd, - 0x87, 0x59, 0x6d, 0xfb, 0x47, 0x01, 0xc6, 0x63, 0xb4, 0x72, 0x44, 0x9a, 0xe8, 0x39, 0x23, 0xb2, - 0xce, 0xc9, 0x44, 0x64, 0x9d, 0x61, 0x86, 0xad, 0x49, 0xaf, 0x3f, 0x0b, 0xc5, 0xa6, 0x73, 0x57, - 0x88, 0x27, 0x9f, 0xe9, 0xdc, 0x0d, 0x4a, 0x7f, 0x76, 0xc5, 0xb9, 0xcb, 0x5f, 0xf0, 0xcf, 0xc8, - 0xdd, 0xb9, 0xe2, 0xdc, 0xed, 0x6a, 0x05, 0x4f, 0x1b, 0x61, 0x6d, 0xb9, 0x9e, 0x30, 0x6b, 0xeb, - 0xa9, 0x2d, 0xd7, 0x4b, 0xb6, 0xe5, 0x7a, 0x3d, 0xb4, 0xe5, 0x7a, 0xe8, 0x1e, 0x0c, 0x0a, 0xab, - 0x53, 0x11, 0xf2, 0xef, 0x42, 0x0f, 0xed, 0x09, 0xa3, 0x55, 0xde, 0xe6, 0x05, 0x29, 0xa1, 0x10, - 0xa5, 0x5d, 0xdb, 0x95, 0x0d, 0xa2, 0xff, 0xd4, 0x82, 0x71, 0xf1, 0x1b, 0x93, 0x77, 0xdb, 0x24, - 0x8c, 0x04, 0x07, 0xff, 0x91, 0xde, 0xfb, 0x20, 0x2a, 0xf2, 0xae, 0x7c, 0x44, 0x5e, 0xb6, 0x26, - 0xb0, 0x6b, 0x8f, 0x12, 0xbd, 0x40, 0xbf, 0x65, 0xc1, 0xd1, 0xa6, 0x73, 0x97, 0xb7, 0xc8, 0xcb, - 0xb0, 0x13, 0xb9, 0xbe, 0xb0, 0xde, 0x78, 0xad, 0xb7, 0xe9, 0x4f, 0x55, 0xe7, 0x9d, 0x94, 0xaa, - 0xda, 0xa3, 0x59, 0x28, 0x5d, 0xbb, 0x9a, 0xd9, 0xaf, 0x99, 0x75, 0x18, 0x92, 0xeb, 0xed, 0x61, - 0x9a, 0xd4, 0xb3, 0x76, 0xc4, 0x5a, 0x7b, 0xa8, 0xed, 0x7c, 0x16, 0x46, 0xf5, 0x35, 0xf6, 0x50, - 0xdb, 0x7a, 0x17, 0x8e, 0x64, 0xac, 0xa5, 0x87, 0xda, 0xe4, 0x1d, 0x38, 0x99, 0xbb, 0x3e, 0x1e, - 0xaa, 0x4b, 0xc4, 0xef, 0x58, 0xfa, 0x39, 0x78, 0x08, 0x7a, 0x9d, 0x05, 0x53, 0xaf, 0x73, 0xa6, - 0xf3, 0xce, 0xc9, 0x51, 0xee, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x68, 0xd0, - 0x12, 0x69, 0xbb, 0x6c, 0x77, 0xdf, 0x91, 0x31, 0x47, 0xcd, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0x5f, - 0xb1, 0x20, 0xc3, 0xa9, 0x83, 0x72, 0x58, 0x6d, 0xb7, 0xce, 0x86, 0xa4, 0x18, 0x73, 0x58, 0x2a, - 0xf0, 0xcc, 0x69, 0x28, 0x6e, 0xb8, 0x75, 0xe1, 0xcd, 0xac, 0xc0, 0x97, 0x29, 0x78, 0xc3, 0xad, - 0xa3, 0x65, 0x40, 0x61, 0xbb, 0xd5, 0x6a, 0x30, 0x83, 0x27, 0xa7, 0x71, 0x39, 0xf0, 0xdb, 0x2d, - 0x6e, 0xa8, 0x5c, 0xe4, 0xe2, 0xa5, 0x6a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, 0x7f, 0x60, 0x41, 0xdf, - 0x21, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x53, 0xc4, 0x2c, 0x76, 0xee, 0x2c, - 0xdd, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0x76, 0x2d, 0x38, 0x72, 0xcd, 0x77, 0xea, - 0xf3, 0x4e, 0xc3, 0xf1, 0x6a, 0x24, 0x28, 0x7b, 0x1b, 0x07, 0xf2, 0x0a, 0x28, 0x74, 0xf5, 0x0a, - 0xb8, 0x04, 0x03, 0x6e, 0x4b, 0x0b, 0x35, 0x7f, 0x96, 0xce, 0x6e, 0xb9, 0x22, 0xa2, 0xcc, 0x23, - 0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x1c, 0xaf, 0x2f, 0x7f, 0x59, 0xd2, 0x57, - 0x52, 0x32, 0x84, 0x9a, 0x61, 0x38, 0xbe, 0x09, 0x46, 0x13, 0xc2, 0x4d, 0x0a, 0xc3, 0xa0, 0xcb, - 0xbf, 0x54, 0xac, 0xcd, 0x27, 0xb3, 0x5f, 0x2f, 0xa9, 0x81, 0xd1, 0xfc, 0x01, 0x79, 0x01, 0x96, - 0x84, 0xec, 0x4b, 0x90, 0x19, 0xf2, 0xa6, 0xbb, 0x64, 0xca, 0xfe, 0x04, 0x4c, 0xb1, 0x9a, 0x07, - 0x94, 0xfa, 0xd8, 0x09, 0x79, 0x7a, 0x46, 0xd4, 0x60, 0xfb, 0x7f, 0xb5, 0x00, 0xad, 0xf8, 0x75, - 0x77, 0x7d, 0x47, 0x10, 0xe7, 0xdf, 0xff, 0x2e, 0x94, 0xf8, 0xb3, 0x3a, 0x19, 0x59, 0x77, 0xa1, - 0xe1, 0x84, 0xa1, 0x26, 0xcb, 0x7f, 0x52, 0xb4, 0x5b, 0x5a, 0xeb, 0x8c, 0x8e, 0xbb, 0xd1, 0x43, - 0x6f, 0x25, 0x02, 0x1d, 0x7e, 0x34, 0x15, 0xe8, 0xf0, 0xc9, 0x4c, 0x8b, 0x9a, 0x74, 0xef, 0x65, - 0x00, 0x44, 0xfb, 0x8b, 0x16, 0x4c, 0xac, 0x26, 0x22, 0xc5, 0x9e, 0x63, 0xe6, 0x05, 0x19, 0x3a, - 0xaa, 0x2a, 0x2b, 0xc5, 0x02, 0xfa, 0xc0, 0x65, 0xb8, 0xdf, 0xb6, 0x20, 0x0e, 0xb1, 0x75, 0x08, - 0x2c, 0xf7, 0x82, 0xc1, 0x72, 0x67, 0x3e, 0x5f, 0x54, 0x77, 0xf2, 0x38, 0x6e, 0x74, 0x55, 0xcd, - 0x49, 0x87, 0x97, 0x4b, 0x4c, 0x86, 0xef, 0xb3, 0x71, 0x73, 0xe2, 0xd4, 0x6c, 0x7c, 0xa3, 0x00, - 0x48, 0xe1, 0xf6, 0x1c, 0x1c, 0x33, 0x5d, 0xe3, 0xc1, 0x04, 0xc7, 0xdc, 0x06, 0xc4, 0x0c, 0x64, - 0x02, 0xc7, 0x0b, 0x39, 0x59, 0x57, 0x48, 0xad, 0x0f, 0x66, 0x7d, 0x33, 0x23, 0xbd, 0x65, 0xaf, - 0xa5, 0xa8, 0xe1, 0x8c, 0x16, 0x34, 0xc3, 0xa7, 0xfe, 0x5e, 0x0d, 0x9f, 0x06, 0xba, 0xb8, 0x7d, - 0x7f, 0xcd, 0x82, 0x31, 0x35, 0x4c, 0xef, 0x13, 0xe7, 0x11, 0xd5, 0x9f, 0x9c, 0x7b, 0xa5, 0xa2, - 0x75, 0x99, 0x31, 0x03, 0xdf, 0xc7, 0xdc, 0xf7, 0x9d, 0x86, 0x7b, 0x8f, 0xa8, 0x18, 0xce, 0x25, - 0xe1, 0x8e, 0x2f, 0x4a, 0xf7, 0x77, 0x4b, 0x63, 0xea, 0x1f, 0x8f, 0x1a, 0x1b, 0x57, 0xb1, 0x7f, - 0x99, 0x6e, 0x76, 0x73, 0x29, 0xa2, 0x97, 0xa0, 0xbf, 0xb5, 0xe9, 0x84, 0x24, 0xe1, 0x64, 0xd7, - 0x5f, 0xa1, 0x85, 0xfb, 0xbb, 0xa5, 0x71, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7b, 0xc8, 0xd1, - 0xf4, 0xe2, 0xec, 0x1a, 0x72, 0xf4, 0x5f, 0x2c, 0xe8, 0x5b, 0xa5, 0xb7, 0xd7, 0xc3, 0x3f, 0x02, - 0x5e, 0x37, 0x8e, 0x80, 0x53, 0x79, 0xd9, 0x8c, 0x72, 0x77, 0xff, 0x72, 0x62, 0xf7, 0x9f, 0xc9, - 0xa5, 0xd0, 0x79, 0xe3, 0x37, 0x61, 0x84, 0xe5, 0x48, 0x12, 0x0e, 0x85, 0x2f, 0x18, 0x1b, 0xbe, - 0x94, 0xd8, 0xf0, 0x13, 0x1a, 0xaa, 0xb6, 0xd3, 0x9f, 0x82, 0x41, 0xe1, 0xa1, 0x96, 0x8c, 0x82, - 0x20, 0x70, 0xb1, 0x84, 0xdb, 0xbf, 0x50, 0x04, 0x23, 0x27, 0x13, 0xfa, 0x23, 0x0b, 0x66, 0x03, - 0x6e, 0xb9, 0x5e, 0x5f, 0x6c, 0x07, 0xae, 0xb7, 0x51, 0xad, 0x6d, 0x92, 0x7a, 0xbb, 0xe1, 0x7a, - 0x1b, 0xe5, 0x0d, 0xcf, 0x57, 0xc5, 0x4b, 0x77, 0x49, 0xad, 0xcd, 0xb4, 0xca, 0x5d, 0x12, 0x40, - 0x29, 0x0f, 0x90, 0xe7, 0xf7, 0x76, 0x4b, 0xb3, 0xf8, 0x40, 0xb4, 0xf1, 0x01, 0xfb, 0x82, 0xfe, - 0xd2, 0x82, 0x0b, 0x3c, 0x37, 0x50, 0xef, 0xfd, 0xef, 0x20, 0xe1, 0xa8, 0x48, 0x52, 0x31, 0x91, - 0x35, 0x12, 0x34, 0xe7, 0x5f, 0x16, 0x03, 0x7a, 0xa1, 0x72, 0xb0, 0xb6, 0xf0, 0x41, 0x3b, 0x67, - 0xff, 0xb7, 0x45, 0x18, 0x13, 0xa1, 0x29, 0xc5, 0x1d, 0xf0, 0x92, 0xb1, 0x24, 0x1e, 0x4d, 0x2c, - 0x89, 0x29, 0x03, 0xf9, 0xc1, 0x1c, 0xff, 0x21, 0x4c, 0xd1, 0xc3, 0xf9, 0x0a, 0x71, 0x82, 0xe8, - 0x36, 0x71, 0xb8, 0x3d, 0x63, 0xf1, 0xc0, 0xa7, 0xbf, 0x12, 0xac, 0x5f, 0x4b, 0x12, 0xc3, 0x69, - 0xfa, 0xdf, 0x4b, 0x77, 0x8e, 0x07, 0x93, 0xa9, 0xe8, 0xa2, 0x6f, 0xc3, 0xb0, 0x72, 0xaf, 0x12, - 0x87, 0x4e, 0xe7, 0x20, 0xbd, 0x49, 0x0a, 0x5c, 0xe8, 0x19, 0xbb, 0xf6, 0xc5, 0xe4, 0xec, 0xdf, - 0x2e, 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x85, 0x21, 0x27, 0x64, 0x81, 0xc3, 0xeb, 0x9d, 0x24, 0xda, - 0xa9, 0x66, 0x98, 0x8b, 0xdb, 0x9c, 0xa8, 0x89, 0x15, 0x0d, 0x74, 0x85, 0x5b, 0x8d, 0x6e, 0x93, - 0x4e, 0xe2, 0xec, 0x14, 0x35, 0x90, 0x76, 0xa5, 0xdb, 0x04, 0x8b, 0xfa, 0xe8, 0x53, 0xdc, 0xac, - 0xf7, 0xaa, 0xe7, 0xdf, 0xf1, 0x2e, 0xfb, 0xbe, 0x0c, 0x43, 0xd4, 0x1b, 0xc1, 0x29, 0x69, 0xcc, - 0xab, 0xaa, 0x63, 0x93, 0x5a, 0x6f, 0xe1, 0xba, 0x3f, 0x07, 0x2c, 0x17, 0x8a, 0x19, 0xcd, 0x20, - 0x44, 0x04, 0x26, 0x44, 0xdc, 0x53, 0x59, 0x26, 0xc6, 0x2e, 0xf3, 0xf9, 0x6d, 0xd6, 0x8e, 0x35, - 0x40, 0x57, 0x4d, 0x12, 0x38, 0x49, 0xd3, 0xde, 0xe4, 0x87, 0xf0, 0x32, 0x71, 0xa2, 0x76, 0x40, - 0x42, 0xf4, 0x71, 0x98, 0x4e, 0xbf, 0x8c, 0x85, 0x22, 0xc5, 0x62, 0xdc, 0xf3, 0xa9, 0xbd, 0xdd, - 0xd2, 0x74, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0xb6, 0x7f, 0xcd, 0x02, 0xe6, 0x43, 0x7e, 0x08, 0x9c, - 0xcf, 0xc7, 0x4c, 0xce, 0x67, 0x3a, 0x6f, 0x3a, 0x73, 0x98, 0x9e, 0x17, 0xf9, 0x1a, 0xae, 0x04, - 0xfe, 0xdd, 0x1d, 0x61, 0xf5, 0xd5, 0xfd, 0x19, 0x67, 0x7f, 0xd9, 0x02, 0x96, 0x38, 0x08, 0xf3, - 0x57, 0xbb, 0x54, 0x70, 0x74, 0x37, 0x68, 0xf8, 0x38, 0x0c, 0xad, 0x8b, 0xe1, 0xcf, 0x10, 0x3a, - 0x19, 0x1d, 0x36, 0x69, 0xcb, 0x49, 0x13, 0xbe, 0xa0, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2, - 0x82, 0x99, 0xfc, 0x6a, 0xe8, 0x06, 0x9c, 0x08, 0x48, 0xad, 0x1d, 0x84, 0x74, 0x4b, 0x88, 0x07, - 0x90, 0x70, 0xa7, 0xe2, 0x53, 0xfd, 0xc8, 0xde, 0x6e, 0xe9, 0x04, 0xce, 0x46, 0xc1, 0x79, 0x75, - 0xd1, 0x2b, 0x30, 0xde, 0x0e, 0x39, 0xe7, 0xc7, 0x98, 0xae, 0x50, 0x44, 0xa7, 0x66, 0x1e, 0x47, - 0x37, 0x0c, 0x08, 0x4e, 0x60, 0xda, 0x3f, 0xc0, 0x97, 0xa3, 0x0a, 0x50, 0xdd, 0x84, 0x29, 0x4f, - 0xfb, 0x4f, 0x6f, 0x40, 0xf9, 0xd4, 0x7f, 0xbc, 0xdb, 0xad, 0xcf, 0xae, 0x4b, 0xcd, 0xcb, 0x3d, - 0x41, 0x06, 0xa7, 0x29, 0xdb, 0xbf, 0x68, 0xc1, 0x09, 0x1d, 0x51, 0x73, 0xa4, 0xeb, 0xa6, 0x05, - 0x5c, 0x84, 0x21, 0xbf, 0x45, 0x02, 0x27, 0xf2, 0x03, 0x71, 0xcd, 0x9d, 0x97, 0x2b, 0xf4, 0xba, - 0x28, 0xdf, 0x17, 0x09, 0x73, 0x24, 0x75, 0x59, 0x8e, 0x55, 0x4d, 0x64, 0xc3, 0x00, 0x13, 0x20, - 0x86, 0xc2, 0x65, 0x92, 0x1d, 0x5a, 0xcc, 0xb2, 0x25, 0xc4, 0x02, 0x62, 0xff, 0xa3, 0xc5, 0xd7, - 0xa7, 0xde, 0x75, 0xf4, 0x2e, 0x4c, 0x36, 0x9d, 0xa8, 0xb6, 0xb9, 0x74, 0xb7, 0x15, 0x70, 0xe5, - 0xae, 0x1c, 0xa7, 0x67, 0xba, 0x8d, 0x93, 0xf6, 0x91, 0xb1, 0x69, 0xf5, 0x4a, 0x82, 0x18, 0x4e, - 0x91, 0x47, 0xb7, 0x61, 0x84, 0x95, 0x31, 0x6f, 0xe0, 0xb0, 0x13, 0x2f, 0x93, 0xd7, 0x9a, 0x32, - 0x0e, 0x5a, 0x89, 0xe9, 0x60, 0x9d, 0xa8, 0xfd, 0xd5, 0x22, 0x3f, 0x34, 0xd8, 0xdb, 0xe3, 0x29, - 0x18, 0x6c, 0xf9, 0xf5, 0x85, 0xf2, 0x22, 0x16, 0xb3, 0xa0, 0xee, 0xbd, 0x0a, 0x2f, 0xc6, 0x12, - 0x8e, 0xce, 0xc3, 0x90, 0xf8, 0x29, 0x95, 0xf1, 0x6c, 0x8f, 0x08, 0xbc, 0x10, 0x2b, 0x28, 0x7a, - 0x1e, 0xa0, 0x15, 0xf8, 0xdb, 0x6e, 0x9d, 0x45, 0x7f, 0x2a, 0x9a, 0x76, 0x7d, 0x15, 0x05, 0xc1, - 0x1a, 0x16, 0x7a, 0x15, 0xc6, 0xda, 0x5e, 0xc8, 0xf9, 0x27, 0x2d, 0xc6, 0xbe, 0xb2, 0x38, 0xbb, - 0xa1, 0x03, 0xb1, 0x89, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98, 0x9d, 0x5a, 0x7f, 0xbe, 0xf9, 0xfd, - 0x1a, 0xc5, 0xd0, 0xb3, 0xd9, 0xd1, 0x0a, 0x58, 0x54, 0x44, 0x6f, 0x4b, 0xc7, 0x7c, 0x7e, 0x13, - 0x09, 0xbf, 0x97, 0xde, 0x6e, 0x2d, 0xcd, 0x2d, 0x5f, 0xf8, 0xd3, 0x18, 0xb4, 0xd0, 0x2b, 0x00, - 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0x69, 0x28, 0xeb, 0x52, 0xc5, 0xc8, 0x2c, 0xfa, 0xab, 0x7e, 0x74, - 0x23, 0x24, 0x4b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x8e, 0x00, 0xc4, 0x0f, 0x0d, 0x74, 0x0f, - 0x86, 0x6a, 0x4e, 0xcb, 0xa9, 0xf1, 0x54, 0xad, 0xc5, 0x3c, 0x7f, 0xe9, 0xb8, 0xc6, 0xec, 0x82, - 0x40, 0xe7, 0xca, 0x1b, 0x19, 0xa6, 0x7c, 0x48, 0x16, 0x77, 0x55, 0xd8, 0xa8, 0xf6, 0xd0, 0x17, - 0x2c, 0x18, 0x11, 0xd1, 0x95, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, 0xfb, 0x73, 0x71, 0x0d, - 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x5d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x2c, 0xdf, 0xb6, - 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x98, 0x5d, 0x35, 0xfa, 0xb3, 0xf6, 0x86, 0xf1, 0xac, 0xed, - 0xcb, 0xf7, 0x3c, 0x36, 0xf8, 0xed, 0x6e, 0x2f, 0x5a, 0x54, 0xd1, 0xa3, 0x90, 0xf4, 0xe7, 0xbb, - 0xcb, 0x6a, 0x0f, 0xbb, 0x2e, 0x11, 0x48, 0x3e, 0x0b, 0x13, 0x75, 0x93, 0x6b, 0x11, 0x2b, 0xf1, - 0xc9, 0x3c, 0xba, 0x09, 0x26, 0x27, 0xe6, 0x53, 0x12, 0x00, 0x9c, 0x24, 0x8c, 0x2a, 0x3c, 0x28, - 0x4d, 0xd9, 0x5b, 0xf7, 0x85, 0xef, 0x95, 0x9d, 0x3b, 0x97, 0x3b, 0x61, 0x44, 0x9a, 0x14, 0x33, - 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0x61, 0x80, 0xf9, 0x4b, 0x86, 0xd3, 0x43, - 0xf9, 0x6a, 0x0d, 0x33, 0xfa, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, 0x8a, 0xf4, - 0x46, 0x0e, 0xcb, 0xde, 0x8d, 0x90, 0x30, 0x6f, 0xe4, 0xe1, 0xf9, 0xc7, 0x63, 0x47, 0x63, 0x5e, - 0x9e, 0x99, 0xf3, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xd2, 0x15, 0xb1, 0xe2, 0x32, - 0xbb, 0x67, 0xa6, 0xdb, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69, 0x52, 0x16, 0x9a, 0xef, - 0x7a, 0xe1, 0xbd, 0xd5, 0xed, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, 0x23, - 0x17, 0x26, 0x02, 0x83, 0xbd, 0x90, 0x21, 0xde, 0xce, 0xf5, 0xc6, 0xc4, 0x68, 0xc9, 0x03, 0x4c, - 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x63, 0x9d, 0x5f, 0xfe, 0xdd, 0x58, 0xa3, 0x99, - 0x2d, 0x18, 0x33, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x32, 0x79, 0xb2, 0x3c, 0x54, 0xcd, - 0xe3, 0x2b, 0x30, 0xce, 0x36, 0xc2, 0x1d, 0xa7, 0x25, 0x8e, 0xe2, 0xf3, 0xc6, 0x51, 0x6c, 0x9d, - 0x2f, 0xf2, 0x81, 0x91, 0x43, 0x10, 0x1f, 0x9c, 0xf6, 0xaf, 0xf4, 0x8b, 0xca, 0x6a, 0x17, 0xa1, - 0x0b, 0x30, 0x2c, 0x3a, 0xa0, 0x32, 0x70, 0xa9, 0x83, 0x61, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12, - 0xaf, 0xb1, 0xea, 0x9a, 0x87, 0x42, 0x9c, 0x78, 0x4d, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xbf, 0xb7, - 0x7d, 0x3f, 0x52, 0x77, 0xb0, 0xda, 0x6a, 0xf3, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x7b, 0xb7, 0x48, - 0xe0, 0x91, 0x86, 0x99, 0x82, 0x42, 0xdd, 0xbd, 0x57, 0x75, 0x20, 0x36, 0x71, 0x29, 0x07, 0xe1, - 0x87, 0x6c, 0xef, 0x8a, 0x27, 0x76, 0xec, 0xf1, 0x51, 0xe5, 0xb1, 0x2b, 0x24, 0x1c, 0x7d, 0x02, - 0x4e, 0xa8, 0x70, 0x8f, 0x62, 0x65, 0xca, 0x16, 0x07, 0x0c, 0x89, 0xd8, 0x89, 0x85, 0x6c, 0x34, - 0x9c, 0x57, 0x1f, 0xbd, 0x0e, 0xe3, 0xe2, 0x19, 0x26, 0x29, 0x0e, 0x9a, 0xe6, 0x8b, 0x57, 0x0d, - 0x28, 0x4e, 0x60, 0xcb, 0x24, 0x1a, 0xec, 0x7d, 0x22, 0x29, 0x0c, 0xa5, 0x93, 0x68, 0xe8, 0x70, - 0x9c, 0xaa, 0x81, 0xe6, 0x60, 0x82, 0xb3, 0x9d, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0xf8, 0x93, 0xaa, - 0x0d, 0x79, 0xdd, 0x04, 0xe3, 0x24, 0x3e, 0xba, 0x04, 0xa3, 0x4e, 0x50, 0xdb, 0x74, 0x23, 0x52, - 0xa3, 0xbb, 0x8a, 0x59, 0x10, 0x6a, 0xf6, 0x9f, 0x73, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0d, 0xe8, - 0x0b, 0xef, 0x38, 0x2d, 0x71, 0xfa, 0xe4, 0x1f, 0xe5, 0x6a, 0x05, 0x73, 0xd3, 0x2f, 0xfa, 0x1f, - 0xb3, 0x9a, 0xf6, 0x3d, 0x38, 0x92, 0x11, 0x16, 0x87, 0x2e, 0x3d, 0xa7, 0xe5, 0xca, 0x51, 0x49, - 0xb8, 0x69, 0xcc, 0x55, 0xca, 0x72, 0x3c, 0x34, 0x2c, 0xba, 0xbe, 0x59, 0xf8, 0x1c, 0x2d, 0xdd, - 0xb8, 0x5a, 0xdf, 0xcb, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x5f, 0x0b, 0x30, 0x91, 0xa1, 0x1e, 0x64, - 0x29, 0xaf, 0x13, 0xef, 0xbc, 0x38, 0xc3, 0xb5, 0x99, 0xd5, 0xa5, 0x70, 0x80, 0xac, 0x2e, 0xc5, - 0x6e, 0x59, 0x5d, 0xfa, 0xde, 0x4b, 0x56, 0x17, 0x73, 0xc4, 0xfa, 0x7b, 0x1a, 0xb1, 0x8c, 0x4c, - 0x30, 0x03, 0x07, 0xcc, 0x04, 0x63, 0x0c, 0xfa, 0x60, 0x0f, 0x83, 0xfe, 0xd3, 0x05, 0x98, 0x4c, - 0x6a, 0x16, 0x0f, 0x41, 0x3a, 0xff, 0xa6, 0x21, 0x9d, 0x3f, 0xdf, 0x4b, 0x04, 0x81, 0x5c, 0x49, - 0x3d, 0x4e, 0x48, 0xea, 0x9f, 0xee, 0x89, 0x5a, 0x67, 0xa9, 0xfd, 0x2f, 0x15, 0xe0, 0x58, 0xa6, - 0xc2, 0xf5, 0x10, 0xc6, 0xe6, 0xba, 0x31, 0x36, 0xcf, 0xf5, 0x1c, 0x5d, 0x21, 0x77, 0x80, 0x6e, - 0x25, 0x06, 0xe8, 0x42, 0xef, 0x24, 0x3b, 0x8f, 0xd2, 0x37, 0x8b, 0x70, 0x26, 0xb3, 0x5e, 0x2c, - 0xdc, 0x5e, 0x36, 0x84, 0xdb, 0xcf, 0x27, 0x84, 0xdb, 0x76, 0xe7, 0xda, 0x0f, 0x46, 0xda, 0x2d, - 0xa2, 0x0c, 0xb0, 0x58, 0x29, 0xf7, 0x29, 0xe9, 0x36, 0xa2, 0x0c, 0x28, 0x42, 0xd8, 0xa4, 0xfb, - 0xbd, 0x24, 0xe1, 0xfe, 0x1f, 0x2d, 0x38, 0x99, 0x39, 0x37, 0x87, 0x20, 0x67, 0x5c, 0x35, 0xe5, - 0x8c, 0x4f, 0xf5, 0xbc, 0x5a, 0x73, 0x04, 0x8f, 0x5f, 0x1c, 0xc8, 0xf9, 0x16, 0x26, 0xfe, 0xb8, - 0x0e, 0x23, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xc5, 0xaf, 0xab, 0x04, 0x10, 0xcf, 0xb1, 0xc7, 0x69, - 0x5c, 0xbc, 0xbf, 0x5b, 0x9a, 0x49, 0x92, 0x88, 0xc1, 0x58, 0xa7, 0x80, 0x3e, 0x05, 0x43, 0xa1, - 0xcc, 0xdd, 0xd9, 0x77, 0xff, 0xb9, 0x3b, 0x19, 0x27, 0xa9, 0xc4, 0x3b, 0x8a, 0x24, 0xfa, 0x7e, - 0x3d, 0x6a, 0x55, 0x07, 0xc1, 0x26, 0xef, 0xe4, 0x7d, 0xc4, 0xae, 0x7a, 0x1e, 0x60, 0x5b, 0xbd, - 0xa3, 0x92, 0xa2, 0x1b, 0xed, 0x85, 0xa5, 0x61, 0xa1, 0x37, 0x60, 0x32, 0xe4, 0x01, 0x5b, 0x63, - 0x13, 0x19, 0xbe, 0x16, 0x59, 0xcc, 0xbb, 0x6a, 0x02, 0x86, 0x53, 0xd8, 0x68, 0x59, 0xb6, 0xca, - 0x8c, 0xa1, 0xf8, 0xf2, 0x3c, 0x17, 0xb7, 0x28, 0x0c, 0xa2, 0x8e, 0x26, 0x27, 0x81, 0x0d, 0xbf, - 0x56, 0x13, 0x7d, 0x0a, 0x80, 0x2e, 0x22, 0x21, 0xc2, 0x19, 0xcc, 0x3f, 0x42, 0xe9, 0xd9, 0x52, - 0xcf, 0xf4, 0xc0, 0x60, 0xe1, 0x01, 0x16, 0x15, 0x11, 0xac, 0x11, 0x44, 0x0e, 0x8c, 0xc5, 0xff, - 0xe2, 0xac, 0xf4, 0xe7, 0x73, 0x5b, 0x48, 0x12, 0x67, 0xea, 0x8d, 0x45, 0x9d, 0x04, 0x36, 0x29, - 0xa2, 0x4f, 0xc2, 0xc9, 0xed, 0x5c, 0xbb, 0x23, 0xce, 0x4b, 0xb2, 0x34, 0xf3, 0xf9, 0xd6, 0x46, - 0xf9, 0xf5, 0xed, 0xff, 0x09, 0xe0, 0x91, 0x0e, 0x27, 0x3d, 0x9a, 0x33, 0x6d, 0x06, 0x9e, 0x49, - 0xca, 0x55, 0x66, 0x32, 0x2b, 0x1b, 0x82, 0x96, 0xc4, 0x86, 0x2a, 0xbc, 0xe7, 0x0d, 0xf5, 0x13, - 0x96, 0xf6, 0xcc, 0xe2, 0x16, 0xe5, 0x1f, 0x3b, 0xe0, 0x0d, 0xf6, 0x00, 0x45, 0x60, 0xeb, 0x19, - 0x72, 0xa4, 0xe7, 0x7b, 0xee, 0x4e, 0xef, 0x82, 0xa5, 0xdf, 0xc9, 0x0e, 0x71, 0xcf, 0x45, 0x4c, - 0x97, 0x0f, 0xfa, 0xfd, 0x87, 0x15, 0xee, 0xfe, 0x1b, 0x16, 0x9c, 0x4c, 0x15, 0xf3, 0x3e, 0x90, - 0x50, 0x44, 0xe9, 0x5b, 0x7d, 0xcf, 0x9d, 0x97, 0x04, 0xf9, 0x37, 0x5c, 0x11, 0xdf, 0x70, 0x32, - 0x17, 0x2f, 0xd9, 0xf5, 0x2f, 0xfd, 0x7d, 0xe9, 0x08, 0x6b, 0xc0, 0x44, 0xc4, 0xf9, 0x5d, 0x47, - 0x2d, 0x38, 0x5b, 0x6b, 0x07, 0x41, 0xbc, 0x58, 0x33, 0x36, 0x27, 0x7f, 0x2d, 0x3e, 0xbe, 0xb7, - 0x5b, 0x3a, 0xbb, 0xd0, 0x05, 0x17, 0x77, 0xa5, 0x86, 0x3c, 0x40, 0xcd, 0x94, 0x75, 0x1f, 0x3b, - 0x00, 0x72, 0xa4, 0x40, 0x69, 0x5b, 0x40, 0x6e, 0xa7, 0x9b, 0x61, 0x23, 0x98, 0x41, 0xf9, 0x70, - 0x65, 0x37, 0xdf, 0x99, 0x78, 0xfa, 0x33, 0xd7, 0xe0, 0x4c, 0xe7, 0xc5, 0x74, 0xa0, 0x10, 0x14, - 0x7f, 0x63, 0xc1, 0xe9, 0x8e, 0x71, 0xce, 0xbe, 0x0b, 0x1f, 0x0b, 0xf6, 0xe7, 0x2d, 0x78, 0x34, - 0xb3, 0x46, 0xd2, 0x79, 0xb0, 0x46, 0x0b, 0x35, 0x63, 0xd8, 0x38, 0xe2, 0x8f, 0x04, 0xe0, 0x18, - 0xc7, 0xb0, 0x17, 0x2d, 0x74, 0xb5, 0x17, 0xfd, 0x53, 0x0b, 0x52, 0x57, 0xfd, 0x21, 0x70, 0x9e, - 0x65, 0x93, 0xf3, 0x7c, 0xbc, 0x97, 0xd1, 0xcc, 0x61, 0x3a, 0xff, 0x79, 0x02, 0x8e, 0xe7, 0x78, - 0x90, 0x6f, 0xc3, 0xd4, 0x46, 0x8d, 0x98, 0x21, 0x43, 0x3a, 0x85, 0xd2, 0xeb, 0x18, 0x5f, 0x64, - 0xfe, 0xd8, 0xde, 0x6e, 0x69, 0x2a, 0x85, 0x82, 0xd3, 0x4d, 0xa0, 0xcf, 0x5b, 0x70, 0xd4, 0xb9, - 0x13, 0x2e, 0xd1, 0x17, 0x84, 0x5b, 0x9b, 0x6f, 0xf8, 0xb5, 0x2d, 0xca, 0x98, 0xc9, 0x6d, 0xf5, - 0x62, 0xa6, 0x28, 0xfc, 0x56, 0x35, 0x85, 0x6f, 0x34, 0x3f, 0xbd, 0xb7, 0x5b, 0x3a, 0x9a, 0x85, - 0x85, 0x33, 0xdb, 0x42, 0x58, 0xe4, 0x38, 0x73, 0xa2, 0xcd, 0x4e, 0x41, 0x6d, 0xb2, 0x5c, 0xfd, - 0x39, 0x4b, 0x2c, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x81, 0xe1, 0x0d, 0x19, 0xbf, 0x22, 0x83, 0xe5, - 0x8e, 0x07, 0xb2, 0x73, 0x54, 0x0f, 0x6e, 0x80, 0xa3, 0x90, 0x70, 0x4c, 0x14, 0xbd, 0x0e, 0x45, - 0x6f, 0x3d, 0x14, 0xa1, 0xf5, 0xb2, 0xed, 0x80, 0x4d, 0x4b, 0x6b, 0x1e, 0x3a, 0x6a, 0x75, 0xb9, - 0x8a, 0x69, 0x45, 0x74, 0x05, 0x8a, 0xc1, 0xed, 0xba, 0xd0, 0xe3, 0x64, 0x6e, 0x52, 0x3c, 0xbf, - 0x98, 0xd3, 0x2b, 0x46, 0x09, 0xcf, 0x2f, 0x62, 0x4a, 0x02, 0x55, 0xa0, 0x9f, 0xb9, 0x5d, 0x0b, - 0xd6, 0x36, 0xf3, 0x29, 0xdf, 0x21, 0x7c, 0x01, 0xf7, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x6b, - 0x30, 0x50, 0x73, 0xbd, 0x3a, 0x09, 0x04, 0x2f, 0xfb, 0xe1, 0x4c, 0x8d, 0x0d, 0xc3, 0xc8, 0xa1, - 0xc9, 0x15, 0x18, 0x0c, 0x03, 0x0b, 0x5a, 0x8c, 0x2a, 0x69, 0x6d, 0xae, 0xcb, 0x1b, 0x2b, 0x9b, - 0x2a, 0x69, 0x6d, 0x2e, 0x57, 0x3b, 0x52, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xbd, - 0x26, 0x5c, 0xaa, 0x33, 0xc5, 0x9b, 0x66, 0xf4, 0xaf, 0xf9, 0x81, 0xbd, 0xdd, 0x52, 0x61, 0x79, - 0x01, 0x17, 0xd6, 0x6b, 0x68, 0x15, 0x06, 0xd7, 0x79, 0xbc, 0x20, 0x21, 0x1f, 0x7d, 0x32, 0x3b, - 0x94, 0x51, 0x2a, 0xa4, 0x10, 0xf7, 0x6d, 0x15, 0x00, 0x2c, 0x89, 0xb0, 0x94, 0x5b, 0x2a, 0xee, - 0x91, 0x08, 0xbb, 0x3a, 0x7b, 0xb0, 0x58, 0x55, 0xfc, 0xa9, 0x11, 0x47, 0x4f, 0xc2, 0x1a, 0x45, - 0xba, 0xaa, 0x9d, 0x7b, 0xed, 0x80, 0xe5, 0xe4, 0x10, 0x8a, 0x99, 0xcc, 0x55, 0x3d, 0x27, 0x91, - 0x3a, 0xad, 0x6a, 0x85, 0x84, 0x63, 0xa2, 0x68, 0x0b, 0xc6, 0xb6, 0xc3, 0xd6, 0x26, 0x91, 0x5b, - 0x9a, 0x85, 0xeb, 0xcb, 0xe1, 0x66, 0x6f, 0x0a, 0x44, 0x37, 0x88, 0xda, 0x4e, 0x23, 0x75, 0x0a, - 0xb1, 0x67, 0xcd, 0x4d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xb7, 0xed, 0xdf, 0xde, 0x89, - 0x88, 0x88, 0x96, 0x9a, 0x39, 0xfc, 0x6f, 0x71, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, - 0x4d, 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xcc, 0x0f, 0xc5, 0x3e, 0x27, 0x91, 0x72, 0x06, 0x85, 0x9d, - 0x96, 0x31, 0x29, 0x76, 0x4a, 0xb6, 0x36, 0xfd, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x54, 0xfe, 0x29, - 0x59, 0xc9, 0xc0, 0x4f, 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x54, 0x87, 0xf1, 0x96, 0x1f, - 0x44, 0x77, 0xfc, 0x40, 0xae, 0x2f, 0xd4, 0x41, 0x50, 0x6a, 0x60, 0x8a, 0x16, 0x99, 0x59, 0x90, - 0x09, 0xc1, 0x09, 0x9a, 0xe8, 0xe3, 0x30, 0x18, 0xd6, 0x9c, 0x06, 0x29, 0x5f, 0x9f, 0x3e, 0x92, - 0x7f, 0xfd, 0x54, 0x39, 0x4a, 0xce, 0xea, 0xe2, 0xe1, 0x9e, 0x38, 0x0a, 0x96, 0xe4, 0xd0, 0x32, - 0xf4, 0xb3, 0x54, 0xd6, 0x2c, 0xb4, 0x6f, 0x4e, 0x44, 0xf9, 0x94, 0x53, 0x0f, 0x3f, 0x9b, 0x58, - 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x92, 0x02, 0x3f, 0x9c, 0x3e, 0x96, 0xbf, 0x07, 0x84, 0x80, - 0xe1, 0x7a, 0xb5, 0xd3, 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x1e, 0xef, - 0x60, 0xb0, 0x99, 0x7b, 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0xc7, 0x43, 0x69, - 0x9e, 0x85, 0x49, 0x98, 0xfe, 0x63, 0x2b, 0x65, 0xb1, 0xf1, 0x91, 0x5e, 0x05, 0xde, 0x0f, 0xf0, - 0xe1, 0xfa, 0x79, 0x0b, 0x8e, 0xb7, 0x32, 0x3f, 0x44, 0x30, 0x00, 0xbd, 0xc9, 0xcd, 0xf9, 0xa7, - 0xab, 0x30, 0xd0, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0x52, 0x38, 0x50, 0x7c, 0xcf, 0xc2, 0x81, 0x15, - 0x18, 0xaa, 0xf1, 0x97, 0x9c, 0x4c, 0x5f, 0xd0, 0x53, 0x10, 0x53, 0xae, 0xa7, 0x15, 0x15, 0xb1, - 0x22, 0x81, 0x7e, 0xd2, 0x82, 0xd3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0xc2, 0x5c, 0x93, 0x8b, 0xb5, - 0x96, 0xc5, 0xf7, 0xa7, 0xf8, 0x7f, 0x03, 0x79, 0xbf, 0x1b, 0x02, 0xee, 0xdc, 0x18, 0x5a, 0xcc, - 0x90, 0xab, 0x0d, 0x98, 0x3a, 0xc9, 0x1e, 0x64, 0x6b, 0x2f, 0xc2, 0x68, 0xd3, 0x6f, 0x7b, 0x91, - 0xb0, 0xba, 0x14, 0xa6, 0x5b, 0xcc, 0x64, 0x69, 0x45, 0x2b, 0xc7, 0x06, 0x56, 0x42, 0x22, 0x37, - 0x74, 0xdf, 0x12, 0xb9, 0x77, 0x60, 0xd4, 0xd3, 0x1c, 0x12, 0x3a, 0xbd, 0x60, 0x85, 0x74, 0x51, - 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0xb3, 0xb4, 0x0c, 0xde, 0x9b, 0xb4, 0xec, 0x50, - 0x9f, 0xc4, 0xf6, 0x6f, 0x16, 0x32, 0x5e, 0x0c, 0x5c, 0x2a, 0xf7, 0x9a, 0x29, 0x95, 0x3b, 0x97, - 0x94, 0xca, 0xa5, 0x54, 0x55, 0x86, 0x40, 0xae, 0xf7, 0x1c, 0x9a, 0x3d, 0x07, 0xa6, 0xfe, 0x61, - 0x0b, 0x4e, 0x30, 0xdd, 0x07, 0x6d, 0xe0, 0x3d, 0xeb, 0x3b, 0x98, 0x41, 0xec, 0xb5, 0x6c, 0x72, - 0x38, 0xaf, 0x1d, 0xbb, 0x01, 0x67, 0xbb, 0xdd, 0xbb, 0xcc, 0xbe, 0xb8, 0xae, 0xcc, 0x2b, 0x62, - 0xfb, 0xe2, 0x7a, 0x79, 0x11, 0x33, 0x48, 0xaf, 0x61, 0x17, 0xed, 0xff, 0xdb, 0x82, 0x62, 0xc5, - 0xaf, 0x1f, 0xc2, 0x8b, 0xfe, 0x63, 0xc6, 0x8b, 0xfe, 0x91, 0xec, 0x1b, 0xbf, 0x9e, 0xab, 0xec, - 0x5b, 0x4a, 0x28, 0xfb, 0x4e, 0xe7, 0x11, 0xe8, 0xac, 0xda, 0xfb, 0xe5, 0x22, 0x8c, 0x54, 0xfc, - 0xba, 0xda, 0x67, 0xff, 0xfd, 0xfd, 0xb8, 0x11, 0xe5, 0x66, 0xcd, 0xd2, 0x28, 0x33, 0x7b, 0x62, - 0x19, 0xf5, 0xe2, 0xbb, 0xcc, 0x9b, 0xe8, 0x16, 0x71, 0x37, 0x36, 0x23, 0x52, 0x4f, 0x7e, 0xce, - 0xe1, 0x79, 0x13, 0x7d, 0xab, 0x08, 0x13, 0x89, 0xd6, 0x51, 0x03, 0xc6, 0x1a, 0xba, 0x2a, 0x49, - 0xac, 0xd3, 0xfb, 0xd2, 0x42, 0x09, 0x6f, 0x0c, 0xad, 0x08, 0x9b, 0xc4, 0xd1, 0x2c, 0x80, 0xa7, - 0xdb, 0xa4, 0xab, 0x00, 0xcb, 0x9a, 0x3d, 0xba, 0x86, 0x81, 0x5e, 0x82, 0x91, 0xc8, 0x6f, 0xf9, - 0x0d, 0x7f, 0x63, 0xe7, 0x2a, 0x91, 0x11, 0x39, 0x95, 0xc9, 0xf2, 0x5a, 0x0c, 0xc2, 0x3a, 0x1e, - 0xba, 0x0b, 0x53, 0x8a, 0x48, 0xf5, 0x01, 0xa8, 0xd7, 0x98, 0xd8, 0x64, 0x35, 0x49, 0x11, 0xa7, - 0x1b, 0x41, 0xaf, 0xc0, 0x38, 0xb3, 0x9d, 0x66, 0xf5, 0xaf, 0x92, 0x1d, 0x19, 0xa9, 0x99, 0x71, - 0xd8, 0x2b, 0x06, 0x04, 0x27, 0x30, 0xd1, 0x02, 0x4c, 0x35, 0xdd, 0x30, 0x51, 0x7d, 0x80, 0x55, - 0x67, 0x1d, 0x58, 0x49, 0x02, 0x71, 0x1a, 0xdf, 0xfe, 0x75, 0x31, 0xc7, 0x5e, 0xe4, 0x7e, 0xb0, - 0x1d, 0xdf, 0xdf, 0xdb, 0xf1, 0x9b, 0x16, 0x4c, 0xd2, 0xd6, 0x99, 0x41, 0xa8, 0x64, 0xa4, 0x54, - 0x2e, 0x0f, 0xab, 0x43, 0x2e, 0x8f, 0x73, 0xf4, 0xd8, 0xae, 0xfb, 0xed, 0x48, 0x48, 0x47, 0xb5, - 0x73, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0x84, 0xd7, 0xbd, 0x8e, 0x47, 0x82, 0x00, - 0x0b, 0xa8, 0x4c, 0xf5, 0xd1, 0x97, 0x9d, 0xea, 0x83, 0x47, 0x6c, 0x17, 0x76, 0x74, 0x82, 0xa5, - 0xd5, 0x22, 0xb6, 0x4b, 0x03, 0xbb, 0x18, 0xc7, 0xfe, 0x76, 0x11, 0x46, 0x2b, 0x7e, 0x3d, 0x36, - 0xec, 0x78, 0xd1, 0x30, 0xec, 0x38, 0x9b, 0x30, 0xec, 0x98, 0xd4, 0x71, 0x35, 0x33, 0x8e, 0x37, - 0x01, 0xf9, 0x22, 0x90, 0xfc, 0x65, 0xe2, 0x31, 0xbb, 0x37, 0x61, 0xa8, 0x57, 0x8c, 0xcd, 0x1e, - 0xae, 0xa7, 0x30, 0x70, 0x46, 0xad, 0x0f, 0x4c, 0x42, 0x0e, 0xd7, 0x24, 0xe4, 0x4f, 0x2c, 0xb6, - 0x02, 0x16, 0x57, 0xab, 0xdc, 0x56, 0x19, 0x5d, 0x84, 0x11, 0x76, 0x5a, 0xb2, 0x90, 0x11, 0xd2, - 0x72, 0x82, 0xa5, 0xf1, 0x5c, 0x8d, 0x8b, 0xb1, 0x8e, 0x83, 0xce, 0xc3, 0x50, 0x48, 0x9c, 0xa0, - 0xb6, 0xa9, 0xae, 0x0a, 0x61, 0xe6, 0xc0, 0xcb, 0xb0, 0x82, 0xa2, 0xb7, 0xe2, 0xc0, 0xe3, 0xc5, - 0x7c, 0xc3, 0x67, 0xbd, 0x3f, 0x7c, 0xbb, 0xe5, 0x47, 0x1b, 0xb7, 0x6f, 0x01, 0x4a, 0xe3, 0xf7, - 0xe0, 0x49, 0x56, 0x32, 0x43, 0xe3, 0x0e, 0xa7, 0xc2, 0xe2, 0xfe, 0x9b, 0x05, 0xe3, 0x15, 0xbf, - 0x4e, 0x8f, 0x81, 0xef, 0xa5, 0x3d, 0xaf, 0x67, 0x5d, 0x18, 0xe8, 0x90, 0x75, 0xe1, 0x31, 0xe8, - 0xaf, 0xf8, 0xf5, 0x2e, 0xe1, 0x7b, 0x7f, 0xc5, 0x82, 0xc1, 0x8a, 0x5f, 0x3f, 0x04, 0x25, 0xce, - 0x6b, 0xa6, 0x12, 0xe7, 0x44, 0xce, 0xba, 0xc9, 0xd1, 0xdb, 0xfc, 0x79, 0x1f, 0x8c, 0xd1, 0x7e, - 0xfa, 0x1b, 0x72, 0x2a, 0x8d, 0x61, 0xb3, 0x7a, 0x18, 0x36, 0xfa, 0xa4, 0xf0, 0x1b, 0x0d, 0xff, - 0x4e, 0x72, 0x5a, 0x97, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x2c, 0x0c, 0xb5, 0x02, 0xb2, 0xed, 0xfa, - 0x82, 0x57, 0xd7, 0x54, 0x62, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0xc4, 0x87, 0xae, 0x47, 0xf9, - 0x92, 0x9a, 0xef, 0xd5, 0xb9, 0x9e, 0xa3, 0x28, 0x52, 0x83, 0x69, 0xe5, 0xd8, 0xc0, 0x42, 0xb7, - 0x60, 0x98, 0xfd, 0x67, 0xc7, 0x4e, 0xff, 0x81, 0x8f, 0x1d, 0x91, 0x2c, 0x59, 0x10, 0xc0, 0x31, - 0x2d, 0xf4, 0x3c, 0x40, 0x24, 0xd3, 0xeb, 0x84, 0x22, 0x8c, 0xab, 0x7a, 0xd7, 0xa8, 0xc4, 0x3b, - 0x21, 0xd6, 0xb0, 0xd0, 0x33, 0x30, 0x1c, 0x39, 0x6e, 0xe3, 0x9a, 0xeb, 0x31, 0x5b, 0x00, 0xda, - 0x7f, 0x91, 0xb3, 0x58, 0x14, 0xe2, 0x18, 0x4e, 0xf9, 0x4a, 0x16, 0xdd, 0x6a, 0x7e, 0x27, 0x12, - 0xe9, 0xf9, 0x8a, 0x9c, 0xaf, 0xbc, 0xa6, 0x4a, 0xb1, 0x86, 0x81, 0x36, 0xe1, 0x94, 0xeb, 0xb1, - 0x34, 0x5a, 0xa4, 0xba, 0xe5, 0xb6, 0xd6, 0xae, 0x55, 0x6f, 0x92, 0xc0, 0x5d, 0xdf, 0x99, 0x77, - 0x6a, 0x5b, 0xc4, 0xab, 0x33, 0xb1, 0xc3, 0xd0, 0xfc, 0xe3, 0xa2, 0x8b, 0xa7, 0xca, 0x1d, 0x70, - 0x71, 0x47, 0x4a, 0xc8, 0xa6, 0xdb, 0x31, 0x20, 0x4e, 0x53, 0xc8, 0x17, 0x78, 0x0a, 0x1e, 0x56, - 0x82, 0x05, 0xc4, 0x7e, 0x81, 0xed, 0x89, 0xeb, 0x55, 0xf4, 0xb4, 0x71, 0xbc, 0x1c, 0xd7, 0x8f, - 0x97, 0xfd, 0xdd, 0xd2, 0xc0, 0xf5, 0xaa, 0x16, 0xe9, 0xe8, 0x12, 0x1c, 0xab, 0xf8, 0xf5, 0x8a, - 0x1f, 0x44, 0xcb, 0x7e, 0x70, 0xc7, 0x09, 0xea, 0x72, 0x09, 0x96, 0x64, 0xac, 0x27, 0x7a, 0xc6, - 0xf6, 0xf3, 0x13, 0xc8, 0x88, 0xe3, 0xf4, 0x02, 0xe3, 0x10, 0x0f, 0xe8, 0x5a, 0x5b, 0x63, 0xbc, - 0x8a, 0x4a, 0x56, 0x77, 0xd9, 0x89, 0x08, 0xba, 0x0e, 0x63, 0x35, 0xfd, 0xda, 0x16, 0xd5, 0x9f, - 0x92, 0x97, 0x9d, 0x71, 0xa7, 0x67, 0xde, 0xf3, 0x66, 0x7d, 0xfb, 0x1b, 0x96, 0x68, 0x85, 0x4b, - 0x3e, 0xb8, 0x0d, 0x6d, 0xf7, 0x33, 0x77, 0x01, 0xa6, 0x02, 0xbd, 0x8a, 0x66, 0x8b, 0x76, 0x8c, - 0x67, 0xff, 0x49, 0x00, 0x71, 0x1a, 0x1f, 0x7d, 0x12, 0x4e, 0x1a, 0x85, 0x52, 0x2d, 0xaf, 0xe5, - 0xe0, 0x66, 0xb2, 0x21, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x08, 0xc7, 0x93, 0xdf, 0x25, - 0xa4, 0x35, 0xf7, 0xf9, 0x75, 0x85, 0x83, 0x7d, 0x9d, 0xfd, 0x12, 0x4c, 0xd1, 0x67, 0xbc, 0x62, - 0x49, 0xd9, 0xfc, 0x75, 0x0f, 0xa7, 0xf5, 0xdb, 0x43, 0xec, 0x1a, 0x4c, 0x64, 0xa0, 0x43, 0x9f, - 0x86, 0xf1, 0x90, 0xb0, 0x18, 0x72, 0x52, 0x4a, 0xd8, 0xc1, 0x2f, 0xbe, 0xba, 0xa4, 0x63, 0xf2, - 0x97, 0x90, 0x59, 0x86, 0x13, 0xd4, 0x50, 0x13, 0xc6, 0xef, 0xb8, 0x5e, 0xdd, 0xbf, 0x13, 0x4a, - 0xfa, 0x43, 0xf9, 0x2a, 0x87, 0x5b, 0x1c, 0x33, 0xd1, 0x47, 0xa3, 0xb9, 0x5b, 0x06, 0x31, 0x9c, - 0x20, 0x4e, 0x8f, 0x9a, 0xa0, 0xed, 0xcd, 0x85, 0x37, 0x42, 0x12, 0x88, 0x08, 0x77, 0xec, 0xa8, - 0xc1, 0xb2, 0x10, 0xc7, 0x70, 0x7a, 0xd4, 0xb0, 0x3f, 0xcc, 0xb1, 0x9e, 0x9d, 0x65, 0xe2, 0xa8, - 0xc1, 0xaa, 0x14, 0x6b, 0x18, 0xf4, 0x28, 0x66, 0xff, 0x56, 0x7d, 0x0f, 0xfb, 0x7e, 0x24, 0x0f, - 0x6f, 0x96, 0xae, 0x53, 0x2b, 0xc7, 0x06, 0x56, 0x4e, 0x3c, 0xbd, 0xbe, 0x83, 0xc6, 0xd3, 0x43, - 0x51, 0x87, 0x58, 0x02, 0x3c, 0x22, 0xf4, 0xa5, 0x4e, 0xb1, 0x04, 0xf6, 0xef, 0x2b, 0xce, 0x00, - 0xe5, 0x05, 0xd6, 0xc5, 0x00, 0xf5, 0xf3, 0x80, 0x81, 0x4c, 0x29, 0x5a, 0xe5, 0xa3, 0x23, 0x61, - 0x68, 0x09, 0x06, 0xc3, 0x9d, 0xb0, 0x16, 0x35, 0xc2, 0x4e, 0x29, 0x59, 0xab, 0x0c, 0x45, 0xcb, - 0x08, 0xce, 0xab, 0x60, 0x59, 0x17, 0xd5, 0xe0, 0x88, 0xa0, 0xb8, 0xb0, 0xe9, 0x78, 0x2a, 0x51, - 0x24, 0xb7, 0x7e, 0xbc, 0xb8, 0xb7, 0x5b, 0x3a, 0x22, 0x5a, 0xd6, 0xc1, 0xfb, 0xbb, 0x25, 0xba, - 0x25, 0x33, 0x20, 0x38, 0x8b, 0x1a, 0x5f, 0xf2, 0xb5, 0x9a, 0xdf, 0x6c, 0x55, 0x02, 0x7f, 0xdd, - 0x6d, 0x90, 0x4e, 0x8a, 0xe5, 0xaa, 0x81, 0x29, 0x96, 0xbc, 0x51, 0x86, 0x13, 0xd4, 0xd0, 0x6d, - 0x98, 0x70, 0x5a, 0xad, 0xb9, 0xa0, 0xe9, 0x07, 0xb2, 0x81, 0x91, 0x7c, 0x0d, 0xc5, 0x9c, 0x89, - 0xca, 0xf3, 0x44, 0x26, 0x0a, 0x71, 0x92, 0x20, 0x1d, 0x28, 0xb1, 0xd1, 0x8c, 0x81, 0x1a, 0x8b, - 0x07, 0x4a, 0xec, 0xcb, 0x8c, 0x81, 0xca, 0x80, 0xe0, 0x2c, 0x6a, 0xf6, 0x0f, 0x30, 0xc6, 0x9f, - 0xc5, 0x9b, 0x66, 0x6e, 0x46, 0x4d, 0x18, 0x6b, 0xb1, 0x63, 0x5f, 0xe4, 0x70, 0x13, 0x47, 0xc5, - 0x8b, 0x3d, 0x0a, 0x42, 0xef, 0xb0, 0x2c, 0xb4, 0x86, 0x41, 0x6c, 0x45, 0x27, 0x87, 0x4d, 0xea, - 0xf6, 0x2f, 0xcd, 0x30, 0xd6, 0xb1, 0xca, 0xa5, 0x9b, 0x83, 0xc2, 0xe9, 0x52, 0xc8, 0x33, 0x66, - 0xf2, 0xf5, 0x08, 0xf1, 0xfa, 0x12, 0x8e, 0x9b, 0x58, 0xd6, 0x45, 0x9f, 0x82, 0x71, 0xd7, 0x73, - 0xe3, 0xec, 0xcd, 0xe1, 0xf4, 0xd1, 0xfc, 0x68, 0x5e, 0x0a, 0x4b, 0xcf, 0xef, 0xa8, 0x57, 0xc6, - 0x09, 0x62, 0xe8, 0x2d, 0x66, 0x23, 0x2a, 0x49, 0x17, 0x7a, 0x21, 0xad, 0x9b, 0x83, 0x4a, 0xb2, - 0x1a, 0x11, 0xd4, 0x86, 0x23, 0xe9, 0x2c, 0xd6, 0xe1, 0xb4, 0x9d, 0xff, 0x36, 0x4a, 0x27, 0xa2, - 0x8e, 0x13, 0xf1, 0xa5, 0x61, 0x21, 0xce, 0xa2, 0x8f, 0xae, 0x25, 0x73, 0x0c, 0x17, 0x0d, 0x0d, - 0x44, 0x2a, 0xcf, 0xf0, 0x58, 0xc7, 0xf4, 0xc2, 0x1b, 0x70, 0x5a, 0x4b, 0xd3, 0x7a, 0x39, 0x70, - 0x98, 0x8d, 0x92, 0xcb, 0x6e, 0x23, 0x8d, 0xa9, 0x7d, 0x74, 0x6f, 0xb7, 0x74, 0x7a, 0xad, 0x13, - 0x22, 0xee, 0x4c, 0x07, 0x5d, 0x87, 0x63, 0x3c, 0x16, 0xcd, 0x22, 0x71, 0xea, 0x0d, 0xd7, 0x53, - 0x5c, 0x33, 0x3f, 0xbb, 0x4e, 0xee, 0xed, 0x96, 0x8e, 0xcd, 0x65, 0x21, 0xe0, 0xec, 0x7a, 0xe8, - 0x35, 0x18, 0xae, 0x7b, 0xf2, 0x94, 0x1d, 0x30, 0x32, 0xe1, 0x0e, 0x2f, 0xae, 0x56, 0xd5, 0xf7, - 0xc7, 0x7f, 0x70, 0x5c, 0x01, 0x6d, 0x70, 0x15, 0x98, 0x92, 0x5b, 0x0e, 0xa6, 0x42, 0x94, 0x26, - 0x45, 0xfb, 0x46, 0x70, 0x07, 0xae, 0xfb, 0x55, 0x0e, 0x80, 0x46, 0xdc, 0x07, 0x83, 0x30, 0x7a, - 0x13, 0x90, 0xc8, 0xb8, 0x34, 0x57, 0x63, 0x09, 0x02, 0x35, 0xbb, 0x54, 0x25, 0x42, 0xa8, 0xa6, - 0x30, 0x70, 0x46, 0x2d, 0x74, 0x85, 0x1e, 0x8f, 0x7a, 0xa9, 0x38, 0x7e, 0x55, 0xbe, 0xf5, 0x45, - 0xd2, 0x0a, 0x08, 0x33, 0xa5, 0x34, 0x29, 0xe2, 0x44, 0x3d, 0x54, 0x87, 0x53, 0x4e, 0x3b, 0xf2, - 0x99, 0x76, 0xd1, 0x44, 0x5d, 0xf3, 0xb7, 0x88, 0xc7, 0x14, 0xfb, 0x43, 0x2c, 0xf4, 0xe9, 0xa9, - 0xb9, 0x0e, 0x78, 0xb8, 0x23, 0x15, 0xfa, 0x9c, 0xa2, 0x63, 0xa1, 0x29, 0xfe, 0x0c, 0x3f, 0x75, - 0xae, 0x0d, 0x97, 0x18, 0xe8, 0x25, 0x18, 0xd9, 0xf4, 0xc3, 0x68, 0x95, 0x44, 0x77, 0xfc, 0x60, - 0x4b, 0xa4, 0x78, 0x88, 0xd3, 0xea, 0xc4, 0x20, 0xac, 0xe3, 0xa1, 0xa7, 0x60, 0x90, 0x99, 0x9d, - 0x95, 0x17, 0xd9, 0x5d, 0x3b, 0x14, 0x9f, 0x31, 0x57, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xae, - 0x2c, 0xb0, 0xe3, 0x38, 0x81, 0x5a, 0xae, 0x2c, 0x60, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x74, 0x02, - 0x52, 0x09, 0xfc, 0x1a, 0x09, 0xb5, 0x64, 0x4e, 0x8f, 0xf0, 0x04, 0x16, 0x74, 0xb9, 0x56, 0xb3, - 0x10, 0x70, 0x76, 0x3d, 0x44, 0xd2, 0x29, 0x8a, 0xc7, 0xf3, 0xd5, 0xae, 0x69, 0x76, 0xb0, 0xc7, - 0x2c, 0xc5, 0x1e, 0x4c, 0xaa, 0xe4, 0xc8, 0x3c, 0x65, 0x45, 0x38, 0x3d, 0xc1, 0xd6, 0x76, 0xef, - 0xf9, 0x2e, 0x94, 0x22, 0xbb, 0x9c, 0xa0, 0x84, 0x53, 0xb4, 0x8d, 0xd8, 0xba, 0x93, 0x5d, 0x63, - 0xeb, 0x5e, 0x80, 0xe1, 0xb0, 0x7d, 0xbb, 0xee, 0x37, 0x1d, 0xd7, 0x63, 0xd6, 0x3b, 0xda, 0xc3, - 0xbd, 0x2a, 0x01, 0x38, 0xc6, 0x41, 0xcb, 0x30, 0xe4, 0x48, 0x2d, 0x35, 0xca, 0x0f, 0x1b, 0xa8, - 0x74, 0xd3, 0x3c, 0x92, 0x96, 0xd4, 0x4b, 0xab, 0xba, 0xe8, 0x55, 0x18, 0x13, 0xa1, 0x49, 0x78, - 0x14, 0x1e, 0x66, 0x5d, 0xa3, 0x39, 0x53, 0x57, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x0d, 0x18, 0x89, - 0xfc, 0x86, 0x90, 0x71, 0x86, 0xd3, 0xc7, 0xf3, 0xa3, 0xfb, 0xae, 0x29, 0x34, 0x5d, 0x7f, 0xa2, - 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe3, 0xeb, 0x9d, 0xa5, 0x6e, 0x22, 0xa1, 0x48, 0x48, 0x7f, 0x3a, - 0xcf, 0xf4, 0x92, 0xa1, 0x99, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x19, 0xa6, 0x5a, 0x81, - 0xeb, 0xb3, 0x35, 0xa1, 0xb4, 0xee, 0xd3, 0x66, 0xa2, 0xd6, 0x4a, 0x12, 0x01, 0xa7, 0xeb, 0xb0, - 0xc8, 0x32, 0xa2, 0x70, 0xfa, 0x24, 0x4f, 0x36, 0xc7, 0xe5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, - 0x61, 0x27, 0x31, 0x17, 0xe1, 0x4d, 0xcf, 0xe4, 0xc7, 0x2b, 0xd0, 0x45, 0x7d, 0x9c, 0xf7, 0x57, - 0x7f, 0x71, 0x4c, 0x01, 0xd5, 0xb5, 0x1c, 0xef, 0xf4, 0x05, 0x15, 0x4e, 0x9f, 0xea, 0x60, 0xfb, - 0x9b, 0x78, 0x2e, 0xc7, 0x0c, 0x81, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x06, 0x4c, 0x8a, 0xb0, - 0x0b, 0xf1, 0x30, 0x9d, 0x8e, 0xfd, 0xa3, 0x70, 0x02, 0x86, 0x53, 0xd8, 0x3c, 0xd9, 0x9b, 0x73, - 0xbb, 0x41, 0xc4, 0xd1, 0x77, 0xcd, 0xf5, 0xb6, 0xc2, 0xe9, 0x33, 0xec, 0x7c, 0x10, 0xc9, 0xde, - 0x92, 0x50, 0x9c, 0x51, 0x03, 0xad, 0xc1, 0x64, 0x2b, 0x20, 0xa4, 0xc9, 0xde, 0x49, 0xe2, 0x3e, - 0x2b, 0xf1, 0xc0, 0x4a, 0xb4, 0x27, 0x95, 0x04, 0x6c, 0x3f, 0xa3, 0x0c, 0xa7, 0x28, 0xa0, 0x3b, - 0x30, 0xe4, 0x6f, 0x93, 0x60, 0x93, 0x38, 0xf5, 0xe9, 0xb3, 0x1d, 0xbc, 0xf6, 0xc4, 0xe5, 0x76, - 0x5d, 0xe0, 0x26, 0x8c, 0x9a, 0x64, 0x71, 0x77, 0xa3, 0x26, 0xd9, 0x18, 0xfa, 0x4f, 0x2c, 0x38, - 0x29, 0xd5, 0x84, 0xd5, 0x16, 0x1d, 0xf5, 0x05, 0xdf, 0x0b, 0xa3, 0x80, 0x87, 0x02, 0x7a, 0x34, - 0x3f, 0x3c, 0xce, 0x5a, 0x4e, 0x25, 0xa5, 0x45, 0x38, 0x99, 0x87, 0x11, 0xe2, 0xfc, 0x16, 0xe9, - 0xcb, 0x3e, 0x24, 0x91, 0x3c, 0x8c, 0xe6, 0xc2, 0xe5, 0xb7, 0x16, 0x57, 0xa7, 0x1f, 0xe3, 0x71, - 0x8c, 0xe8, 0x66, 0xa8, 0x26, 0x81, 0x38, 0x8d, 0x8f, 0x2e, 0x42, 0xc1, 0x0f, 0xa7, 0x1f, 0x67, - 0x6b, 0xfb, 0x64, 0xce, 0x38, 0x5e, 0xaf, 0x72, 0xe3, 0xd6, 0xeb, 0x55, 0x5c, 0xf0, 0x43, 0x99, - 0x70, 0x8d, 0x3e, 0x67, 0xc3, 0xe9, 0x27, 0xb8, 0xcc, 0x59, 0x26, 0x5c, 0x63, 0x85, 0x38, 0x86, - 0xa3, 0x4d, 0x98, 0x08, 0x0d, 0xb1, 0x41, 0x38, 0x7d, 0x8e, 0x8d, 0xd4, 0x13, 0x79, 0x93, 0x66, - 0x60, 0x6b, 0x99, 0x90, 0x4c, 0x2a, 0x38, 0x49, 0x96, 0xef, 0x2e, 0x4d, 0x70, 0x11, 0x4e, 0x3f, - 0xd9, 0x65, 0x77, 0x69, 0xc8, 0xfa, 0xee, 0xd2, 0x69, 0xe0, 0x04, 0x4d, 0x74, 0x43, 0x77, 0x89, - 0x3c, 0x9f, 0x6f, 0x28, 0x99, 0xe9, 0x0c, 0x39, 0x96, 0xe7, 0x08, 0x39, 0xf3, 0x7d, 0x30, 0x95, - 0xe2, 0xc2, 0x0e, 0xe2, 0x1f, 0x32, 0xb3, 0x05, 0x63, 0xc6, 0x4a, 0x7f, 0xa8, 0xe6, 0x43, 0x3f, - 0x03, 0x30, 0xac, 0xcc, 0x3a, 0x72, 0xf4, 0x6c, 0x53, 0xf7, 0xa5, 0x67, 0xbb, 0x60, 0x5a, 0x1f, - 0x9d, 0x4c, 0x5a, 0x1f, 0x0d, 0x55, 0xfc, 0xba, 0x61, 0x70, 0xb4, 0x96, 0x11, 0x41, 0x38, 0xef, - 0x8c, 0xee, 0xdd, 0x21, 0x4e, 0x53, 0x55, 0x15, 0x7b, 0x36, 0x63, 0xea, 0xeb, 0xa8, 0xfd, 0xba, - 0x0c, 0x53, 0x9e, 0xcf, 0x9e, 0x11, 0xa4, 0x2e, 0x79, 0x44, 0xc6, 0x0a, 0x0e, 0xeb, 0x11, 0xee, - 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x79, 0xb9, 0xa4, 0xba, 0x8d, 0xb3, 0x7a, 0x58, 0x40, - 0xe9, 0xf3, 0x95, 0xff, 0x0a, 0xa7, 0x27, 0xf3, 0x9f, 0xaf, 0xbc, 0x52, 0x92, 0x5f, 0x0c, 0x25, - 0xbf, 0xc8, 0xb4, 0x4b, 0x2d, 0xbf, 0x5e, 0xae, 0x88, 0x97, 0x88, 0x16, 0xdb, 0xbf, 0x5e, 0xae, - 0x60, 0x0e, 0x43, 0x73, 0x30, 0xc0, 0x7e, 0xc8, 0xc8, 0x41, 0x79, 0x27, 0x49, 0xb9, 0xa2, 0xe5, - 0xa4, 0x65, 0x15, 0xb0, 0xa8, 0xc8, 0xb4, 0x07, 0xf4, 0xf9, 0xc6, 0xb4, 0x07, 0x83, 0xf7, 0xa9, - 0x3d, 0x90, 0x04, 0x70, 0x4c, 0x0b, 0xdd, 0x85, 0x63, 0xc6, 0x93, 0x59, 0x79, 0x08, 0x42, 0xbe, - 0x91, 0x42, 0x02, 0x79, 0xfe, 0xb4, 0xe8, 0xf4, 0xb1, 0x72, 0x16, 0x25, 0x9c, 0xdd, 0x00, 0x6a, - 0xc0, 0x54, 0x2d, 0xd5, 0xea, 0x50, 0xef, 0xad, 0xaa, 0x75, 0x91, 0x6e, 0x31, 0x4d, 0x18, 0xbd, - 0x0a, 0x43, 0xef, 0xfa, 0xdc, 0xa0, 0x50, 0xbc, 0x9e, 0x64, 0x7c, 0x9b, 0xa1, 0xb7, 0xae, 0x57, - 0x59, 0xf9, 0xfe, 0x6e, 0x69, 0xa4, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x2a, 0xa0, 0x1f, 0xb3, 0x60, - 0x26, 0xfd, 0x26, 0x57, 0x9d, 0x1e, 0xeb, 0xbd, 0xd3, 0xb6, 0x68, 0x74, 0x66, 0x29, 0x97, 0x1c, - 0xee, 0xd0, 0x14, 0xfa, 0x28, 0xdd, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x84, 0xfe, 0x8f, 0xc6, 0xfb, - 0x89, 0x96, 0xee, 0xef, 0x96, 0x26, 0xf8, 0xe1, 0xed, 0xde, 0x53, 0x59, 0x08, 0x78, 0x05, 0xf4, - 0x83, 0x70, 0x2c, 0x48, 0xcb, 0xc8, 0x89, 0x7c, 0x27, 0x3c, 0xdd, 0xcb, 0x45, 0x90, 0x9c, 0x70, - 0x9c, 0x45, 0x10, 0x67, 0xb7, 0x63, 0xff, 0xa1, 0xc5, 0x74, 0x23, 0xa2, 0x5b, 0x24, 0x6c, 0x37, - 0xa2, 0x43, 0x30, 0xe2, 0x5b, 0x32, 0x6c, 0x13, 0xee, 0xdb, 0x0a, 0xef, 0xbf, 0xb3, 0x98, 0x15, - 0xde, 0x21, 0xfa, 0x13, 0xbe, 0x05, 0x43, 0x91, 0x68, 0x4d, 0x74, 0x3d, 0xcf, 0x62, 0x48, 0x76, - 0x8a, 0x59, 0x22, 0xaa, 0x77, 0x98, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0x5f, 0xf3, 0x19, 0x90, 0x90, - 0x43, 0x50, 0x01, 0x2f, 0x9a, 0x2a, 0xe0, 0x52, 0x97, 0x2f, 0xc8, 0x51, 0x05, 0xff, 0x57, 0x66, - 0xbf, 0x99, 0xfc, 0xf1, 0xfd, 0x6e, 0xfe, 0x69, 0x7f, 0xd1, 0x02, 0x88, 0xd3, 0xbe, 0xf4, 0x90, - 0xc0, 0xfb, 0x12, 0x7d, 0x79, 0xf9, 0x91, 0x5f, 0xf3, 0x1b, 0x42, 0x05, 0x75, 0x2a, 0xd6, 0x42, - 0xf3, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0xe3, 0x30, 0x17, 0x63, 0xbb, 0x08, 0x23, - 0x06, 0xf3, 0x57, 0x2c, 0x38, 0x9a, 0xe5, 0x9c, 0x42, 0xdf, 0xf1, 0x5c, 0x12, 0xab, 0x4c, 0x73, - 0xd5, 0x6c, 0xde, 0x14, 0xe5, 0x58, 0x61, 0xf4, 0x9c, 0x19, 0xfd, 0x60, 0x29, 0x49, 0xae, 0xc3, - 0x58, 0x25, 0x20, 0x1a, 0x7f, 0xf1, 0x7a, 0x9c, 0x2d, 0x69, 0x78, 0xfe, 0xd9, 0x03, 0x47, 0x7c, - 0xb2, 0xbf, 0x5a, 0x80, 0xa3, 0xdc, 0xc0, 0x6c, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf, 0x2e, 0x5c, - 0x8a, 0xdf, 0x86, 0xd1, 0x96, 0x26, 0x3e, 0xef, 0x14, 0x5e, 0x5f, 0x17, 0xb3, 0xc7, 0x02, 0x3f, - 0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x1d, 0x46, 0xc9, 0xb6, 0x5b, 0x53, 0x96, 0x45, 0x85, 0x03, 0x5f, - 0xd2, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0xf6, 0x6c, 0x16, 0xae, 0xb1, 0x68, 0x7d, 0x5d, - 0xac, 0x89, 0x7e, 0xce, 0x82, 0x13, 0x39, 0xc1, 0xf8, 0x69, 0x73, 0x77, 0x98, 0x29, 0x9f, 0x58, - 0xb6, 0xaa, 0x39, 0x6e, 0xe0, 0x87, 0x05, 0x14, 0x7d, 0x1c, 0xa0, 0x15, 0xa7, 0x30, 0xed, 0x12, - 0xb5, 0xdc, 0x88, 0x5f, 0xac, 0x85, 0xa2, 0x55, 0x99, 0x4e, 0x35, 0x5a, 0xf6, 0x57, 0xfa, 0xa0, - 0x9f, 0x19, 0x71, 0xa1, 0x0a, 0x0c, 0x6e, 0xf2, 0x48, 0x89, 0x1d, 0xe7, 0x8d, 0xe2, 0xca, 0xd0, - 0x8b, 0xf1, 0xbc, 0x69, 0xa5, 0x58, 0x92, 0x41, 0x2b, 0x70, 0x84, 0xa7, 0x67, 0x6d, 0x2c, 0x92, - 0x86, 0xb3, 0x23, 0x25, 0xd3, 0x05, 0xf6, 0xa9, 0x4a, 0x42, 0x5f, 0x4e, 0xa3, 0xe0, 0xac, 0x7a, - 0xe8, 0x75, 0x18, 0x8f, 0xdc, 0x26, 0xf1, 0xdb, 0x91, 0xa4, 0xc4, 0xf3, 0xa1, 0xaa, 0xc7, 0xd3, - 0x9a, 0x01, 0xc5, 0x09, 0x6c, 0xf4, 0x2a, 0x8c, 0xb5, 0x52, 0x32, 0xf8, 0xfe, 0x58, 0x58, 0x65, - 0xca, 0xdd, 0x4d, 0x5c, 0xe6, 0x9f, 0xd2, 0x66, 0xde, 0x38, 0x6b, 0x9b, 0x01, 0x09, 0x37, 0xfd, - 0x46, 0x9d, 0x71, 0xc0, 0xfd, 0x9a, 0x7f, 0x4a, 0x02, 0x8e, 0x53, 0x35, 0x28, 0x95, 0x75, 0xc7, - 0x6d, 0xb4, 0x03, 0x12, 0x53, 0x19, 0x30, 0xa9, 0x2c, 0x27, 0xe0, 0x38, 0x55, 0xa3, 0xbb, 0x72, - 0x61, 0xf0, 0xc1, 0x28, 0x17, 0xec, 0x5f, 0x2d, 0x80, 0x31, 0xb5, 0xdf, 0xc3, 0xd9, 0x56, 0x5f, - 0x83, 0xbe, 0x8d, 0xa0, 0x55, 0x13, 0x06, 0x8b, 0x99, 0x5f, 0x76, 0x19, 0x57, 0x16, 0xf4, 0x2f, - 0xa3, 0xff, 0x31, 0xab, 0x45, 0xf7, 0xf8, 0xb1, 0x4a, 0xe0, 0xd3, 0x4b, 0x4e, 0x06, 0x53, 0x55, - 0x6e, 0x60, 0x83, 0xf2, 0xbd, 0xde, 0x21, 0xec, 0xb8, 0xf0, 0x65, 0xe1, 0x14, 0x0c, 0xdb, 0xbe, - 0xaa, 0x78, 0xad, 0x4b, 0x2a, 0xe8, 0x22, 0x8c, 0x88, 0x04, 0x98, 0xcc, 0x5b, 0x89, 0x6f, 0x26, - 0x66, 0x8b, 0xb8, 0x18, 0x17, 0x63, 0x1d, 0xc7, 0xfe, 0xf1, 0x02, 0x1c, 0xc9, 0x70, 0x37, 0xe5, - 0xd7, 0xc8, 0x86, 0x1b, 0x46, 0xc1, 0x4e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0x7a, 0x56, - 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0x77, 0x2e, 0x01, 0x3d, 0xd8, 0xe5, 0x44, 0xaf, 0xed, 0x76, - 0x48, 0x64, 0x86, 0x03, 0x75, 0x6d, 0x33, 0xc3, 0x05, 0x06, 0xa1, 0x4f, 0xc0, 0x0d, 0xa5, 0x8d, - 0xd7, 0x9e, 0x80, 0x5c, 0x1f, 0xcf, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, 0x12, 0x0f, 0xc5, - 0x38, 0xf2, 0x35, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0x27, 0x73, 0x1d, 0xd0, 0x69, 0xd7, - 0x9b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xf2, 0xe4, 0xd1, 0xae, 0x49, 0x6b, 0x73, 0x45, 0x94, 0x63, - 0x85, 0x81, 0xce, 0x41, 0x3f, 0x93, 0xdb, 0x27, 0x93, 0xdf, 0xe1, 0xf9, 0x45, 0x1e, 0x0b, 0x94, - 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf1, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x23, 0x79, 0xa1, 0xd0, - 0xee, 0xfa, 0x7e, 0x03, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0xaa, 0x11, 0x3b, 0x75, 0x3f, - 0xd4, 0x06, 0xed, 0x29, 0x18, 0xdc, 0x22, 0x3b, 0x81, 0xeb, 0x6d, 0x24, 0xad, 0x5d, 0xaf, 0xf2, - 0x62, 0x2c, 0xe1, 0x66, 0x96, 0xf8, 0xc1, 0x07, 0x91, 0x25, 0x5e, 0x5f, 0x01, 0x43, 0x5d, 0xd9, - 0x93, 0x9f, 0x28, 0xc2, 0x04, 0x9e, 0x5f, 0xfc, 0x60, 0x22, 0x6e, 0xa4, 0x27, 0xe2, 0x41, 0x24, - 0x53, 0x3f, 0xd8, 0x6c, 0xfc, 0x9e, 0x05, 0x13, 0x2c, 0x0d, 0xa7, 0x88, 0x1e, 0xe3, 0xfa, 0xde, - 0x21, 0x3c, 0x05, 0x1e, 0x83, 0xfe, 0x80, 0x36, 0x2a, 0x66, 0x50, 0xed, 0x71, 0xd6, 0x13, 0xcc, - 0x61, 0xe8, 0x14, 0xf4, 0xb1, 0x2e, 0xd0, 0xc9, 0x1b, 0xe5, 0x47, 0xf0, 0xa2, 0x13, 0x39, 0x98, - 0x95, 0xb2, 0x38, 0x96, 0x98, 0xb4, 0x1a, 0x2e, 0xef, 0x74, 0x6c, 0x55, 0xf1, 0xfe, 0x08, 0x4d, - 0x93, 0xd9, 0xb5, 0xf7, 0x16, 0xc7, 0x32, 0x9b, 0x64, 0xe7, 0x67, 0xf6, 0x3f, 0x15, 0xe0, 0x4c, - 0x66, 0xbd, 0x9e, 0xe3, 0x58, 0x76, 0xae, 0xfd, 0x30, 0x93, 0xf6, 0x15, 0x0f, 0xd1, 0x97, 0xa0, - 0xaf, 0x57, 0xee, 0xbf, 0xbf, 0x87, 0xf0, 0x92, 0x99, 0x43, 0xf6, 0x3e, 0x09, 0x2f, 0x99, 0xd9, - 0xb7, 0x1c, 0x31, 0xc1, 0xb7, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0xe7, 0xe9, 0x39, 0xc3, 0x80, - 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0x73, 0x30, 0xd1, 0x74, 0x3d, 0x7a, - 0xf8, 0xec, 0x98, 0xac, 0xb8, 0x52, 0xb7, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47, 0xae, 0x16, 0x7a, - 0x92, 0x7f, 0xdd, 0xab, 0x07, 0xda, 0x75, 0xb3, 0xa6, 0xc5, 0x89, 0x1a, 0xc5, 0x8c, 0x30, 0x94, - 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbb, 0x9c, 0x68, 0x34, 0x5b, 0x46, 0x34, 0xf3, 0x2a, 0x8c, 0xdd, - 0xb7, 0x9e, 0xc5, 0xfe, 0x66, 0x11, 0x1e, 0xe9, 0xb0, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76, - 0xd6, 0xa7, 0xe6, 0xa1, 0x02, 0x47, 0xd7, 0xdb, 0x8d, 0xc6, 0x0e, 0x73, 0xc0, 0x23, 0x75, 0x89, - 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xba, 0x9c, 0x81, 0x83, 0x33, 0x6b, 0xd2, 0x27, 0x16, 0xbd, - 0x49, 0x76, 0x14, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xe5, 0x6c, - 0x3b, 0x2e, 0x4f, 0x7a, 0x22, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x25, 0x11, 0x70, 0xba, - 0x4e, 0x8e, 0x4a, 0xa8, 0x78, 0x5f, 0x2a, 0x21, 0x33, 0x08, 0xe2, 0x40, 0x7e, 0x10, 0xc4, 0xce, - 0xe7, 0x62, 0xd7, 0x7c, 0x91, 0xef, 0xc0, 0xd8, 0x41, 0x2d, 0xc7, 0x9f, 0x82, 0xc1, 0x40, 0x64, - 0xe2, 0x4f, 0x78, 0xbb, 0xcb, 0x3c, 0xe5, 0x12, 0x6e, 0xff, 0x6f, 0x16, 0x28, 0x59, 0xb2, 0x19, - 0xef, 0xfc, 0x55, 0x66, 0x06, 0xcf, 0xa5, 0xe0, 0x5a, 0x88, 0xb3, 0x63, 0x9a, 0x19, 0x7c, 0x0c, - 0xc4, 0x26, 0x2e, 0x5f, 0x6e, 0x61, 0x1c, 0x59, 0xc3, 0x78, 0x40, 0x08, 0x0d, 0xa4, 0xc2, 0x40, - 0x9f, 0x80, 0xc1, 0xba, 0xbb, 0xed, 0x86, 0x42, 0x8e, 0x76, 0x60, 0x1d, 0x60, 0xfc, 0x7d, 0x8b, - 0x9c, 0x0c, 0x96, 0xf4, 0xec, 0x9f, 0xb2, 0x40, 0xa9, 0x4e, 0xaf, 0x10, 0xa7, 0x11, 0x6d, 0xa2, - 0x37, 0x00, 0x24, 0x05, 0x25, 0x7b, 0x93, 0x06, 0x5d, 0x80, 0x15, 0x64, 0xdf, 0xf8, 0x87, 0xb5, - 0x3a, 0xe8, 0x75, 0x18, 0xd8, 0x64, 0xb4, 0xc4, 0xb7, 0x9d, 0x53, 0xaa, 0x2e, 0x56, 0xba, 0xbf, - 0x5b, 0x3a, 0x6a, 0xb6, 0x29, 0x6f, 0x31, 0x5e, 0xcb, 0xfe, 0x89, 0x42, 0x3c, 0xa7, 0x6f, 0xb5, - 0xfd, 0xc8, 0x39, 0x04, 0x4e, 0xe4, 0xb2, 0xc1, 0x89, 0x3c, 0xd1, 0x49, 0x37, 0xcc, 0xba, 0x94, - 0xcb, 0x81, 0x5c, 0x4f, 0x70, 0x20, 0x4f, 0x76, 0x27, 0xd5, 0x99, 0xf3, 0xf8, 0x6f, 0x2c, 0x98, - 0x32, 0xf0, 0x0f, 0xe1, 0x02, 0x5c, 0x36, 0x2f, 0xc0, 0x47, 0xbb, 0x7e, 0x43, 0xce, 0xc5, 0xf7, - 0xa3, 0xc5, 0x44, 0xdf, 0xd9, 0x85, 0xf7, 0x2e, 0xf4, 0x6d, 0x3a, 0x41, 0x5d, 0xbc, 0xeb, 0x2f, - 0xf4, 0x34, 0xd6, 0xb3, 0x57, 0x9c, 0x40, 0x18, 0x83, 0x3c, 0x2b, 0x47, 0x9d, 0x16, 0x75, 0x35, - 0x04, 0x61, 0x4d, 0xa1, 0x4b, 0x30, 0x10, 0xd6, 0xfc, 0x96, 0xf2, 0x29, 0x64, 0x49, 0xd4, 0xab, - 0xac, 0x64, 0x7f, 0xb7, 0x84, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x6d, 0x18, 0x63, 0xbf, - 0x94, 0x65, 0x66, 0x31, 0x5f, 0x02, 0x53, 0xd5, 0x11, 0xb9, 0xd9, 0xb2, 0x51, 0x84, 0x4d, 0x52, - 0x33, 0x1b, 0x30, 0xac, 0x3e, 0xeb, 0xa1, 0x6a, 0xfe, 0xff, 0xba, 0x08, 0x47, 0x32, 0xd6, 0x1c, - 0x0a, 0x8d, 0x99, 0xb8, 0xd8, 0xe3, 0x52, 0x7d, 0x8f, 0x73, 0x11, 0xb2, 0x07, 0x60, 0x5d, 0xac, - 0xad, 0x9e, 0x1b, 0xbd, 0x11, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7b, 0xa3, 0xb4, 0xb1, 0x43, 0x1b, - 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x43, 0x9d, 0xd3, 0x3f, 0xe9, 0x83, 0xa3, 0x59, 0xe6, 0x2a, 0xe8, - 0x73, 0x30, 0xc0, 0x9c, 0xde, 0xa4, 0xe0, 0xec, 0xc5, 0x5e, 0x0d, 0x5d, 0x66, 0x99, 0xdf, 0x9c, - 0x08, 0x99, 0x3b, 0x2b, 0x8f, 0x23, 0x5e, 0xd8, 0x75, 0x98, 0x45, 0x9b, 0x2c, 0x94, 0x95, 0xb8, - 0x3d, 0xe5, 0xf1, 0xf1, 0x91, 0x9e, 0x3b, 0x20, 0xee, 0xdf, 0x30, 0x61, 0xf5, 0x25, 0x8b, 0xbb, - 0x5b, 0x7d, 0xc9, 0x96, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0x9c, 0xa8, 0xd8, 0xfd, 0x08, 0xe3, 0xb6, - 0x44, 0xea, 0x00, 0x16, 0x36, 0x44, 0x82, 0xc0, 0x8c, 0x0b, 0x23, 0xda, 0xc0, 0x3c, 0xd4, 0xc5, - 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xea, 0x02, 0xfa, 0x59, 0xed, 0xee, 0x17, 0xe7, 0xc1, - 0x87, 0x0d, 0xde, 0xe9, 0x54, 0xc2, 0x15, 0x31, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x35, 0x63, 0xcd, - 0xe7, 0x26, 0xcc, 0x32, 0x2f, 0xfc, 0xce, 0xf1, 0xe5, 0xed, 0x9f, 0xb3, 0x20, 0xe1, 0x2c, 0xa6, - 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x2c, 0xf4, 0x05, 0x7e, 0x43, 0xf2, 0x53, 0x0a, 0x03, 0xfb, - 0x0d, 0x82, 0x19, 0x84, 0x62, 0x44, 0xb1, 0x10, 0x6b, 0x54, 0x7f, 0xa0, 0x8b, 0xa7, 0xf7, 0x63, - 0xd0, 0xdf, 0x20, 0xdb, 0xa4, 0x91, 0xcc, 0x1b, 0x7b, 0x8d, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0xaf, - 0x0f, 0x4e, 0x77, 0x8c, 0x78, 0x47, 0x19, 0xcc, 0x0d, 0x27, 0x22, 0x77, 0x9c, 0x9d, 0x64, 0xbe, - 0xc4, 0xcb, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0xdc, 0xe6, 0x39, 0x80, 0x12, 0xc2, 0x61, 0x91, 0xfa, - 0x47, 0x40, 0x4d, 0x61, 0x63, 0xf1, 0x41, 0x08, 0x1b, 0x9f, 0x07, 0x08, 0xc3, 0x06, 0xb7, 0x09, - 0xad, 0x0b, 0x8f, 0xf0, 0x38, 0x57, 0x54, 0xf5, 0x9a, 0x80, 0x60, 0x0d, 0x0b, 0x2d, 0xc2, 0x64, - 0x2b, 0xf0, 0x23, 0x2e, 0x6b, 0x5f, 0xe4, 0x66, 0xd3, 0xfd, 0x66, 0xb0, 0xb1, 0x4a, 0x02, 0x8e, - 0x53, 0x35, 0xd0, 0x4b, 0x30, 0x22, 0x02, 0x90, 0x55, 0x7c, 0xbf, 0x21, 0xc4, 0x7b, 0xca, 0x92, - 0xb8, 0x1a, 0x83, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x02, 0xfc, 0xc1, 0xcc, 0x6a, 0x5c, 0x88, 0xaf, - 0xe1, 0x25, 0x92, 0x15, 0x0c, 0xf5, 0x94, 0xac, 0x20, 0x16, 0x78, 0x0e, 0xf7, 0xac, 0x4f, 0x86, - 0xae, 0x22, 0xc2, 0xaf, 0xf5, 0xc1, 0x11, 0xb1, 0x70, 0x1e, 0xf6, 0x72, 0xb9, 0x91, 0x5e, 0x2e, - 0x0f, 0x42, 0x24, 0xfa, 0xc1, 0x9a, 0x39, 0xec, 0x35, 0xf3, 0x93, 0x16, 0x98, 0x3c, 0x24, 0xfa, - 0x8f, 0x72, 0x13, 0xce, 0xbe, 0x94, 0xcb, 0x93, 0xc6, 0x91, 0xcc, 0xdf, 0x5b, 0xea, 0x59, 0xfb, - 0x7f, 0xb1, 0xe0, 0xd1, 0xae, 0x14, 0xd1, 0x12, 0x0c, 0x33, 0x46, 0x57, 0x7b, 0x17, 0x3f, 0xa9, - 0xdc, 0x2a, 0x24, 0x20, 0x87, 0xef, 0x8e, 0x6b, 0xa2, 0xa5, 0x54, 0x66, 0xdf, 0xa7, 0x32, 0x32, - 0xfb, 0x1e, 0x33, 0x86, 0xe7, 0x3e, 0x53, 0xfb, 0x7e, 0x89, 0xde, 0x38, 0xa6, 0x6f, 0xe6, 0x47, - 0x0c, 0x71, 0xae, 0x9d, 0x10, 0xe7, 0x22, 0x13, 0x5b, 0xbb, 0x43, 0xde, 0x80, 0x49, 0x16, 0x99, - 0x94, 0x39, 0xf9, 0x08, 0xa7, 0xce, 0x42, 0x6c, 0xc8, 0x7f, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb, - 0x1f, 0x8a, 0x30, 0xc0, 0xb7, 0xdf, 0x21, 0x3c, 0x7c, 0x9f, 0x81, 0x61, 0xb7, 0xd9, 0x6c, 0xf3, - 0x64, 0xad, 0xfd, 0xb1, 0x59, 0x78, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0x96, 0x85, 0x26, 0xa1, 0x43, - 0xf0, 0x73, 0xde, 0xf1, 0xd9, 0x45, 0x27, 0x72, 0x38, 0x17, 0xa7, 0xee, 0xd9, 0x58, 0xe7, 0x80, - 0x3e, 0x0d, 0x10, 0x46, 0x81, 0xeb, 0x6d, 0xd0, 0x32, 0x91, 0x21, 0xe3, 0xe9, 0x0e, 0xd4, 0xaa, - 0x0a, 0x99, 0xd3, 0x8c, 0xcf, 0x1c, 0x05, 0xc0, 0x1a, 0x45, 0x34, 0x6b, 0xdc, 0xf4, 0x33, 0x89, - 0xb9, 0x03, 0x4e, 0x35, 0x9e, 0xb3, 0x99, 0x97, 0x61, 0x58, 0x11, 0xef, 0x26, 0x57, 0x1c, 0xd5, - 0x19, 0xb6, 0x8f, 0xc1, 0x44, 0xa2, 0x6f, 0x07, 0x12, 0x4b, 0xfe, 0xbe, 0x05, 0x13, 0xbc, 0x33, - 0x4b, 0xde, 0xb6, 0xb8, 0x0d, 0xee, 0xc1, 0xd1, 0x46, 0xc6, 0xa9, 0x2c, 0xa6, 0xbf, 0xf7, 0x53, - 0x5c, 0x89, 0x21, 0xb3, 0xa0, 0x38, 0xb3, 0x0d, 0x74, 0x9e, 0xee, 0x38, 0x7a, 0xea, 0x3a, 0x0d, - 0x11, 0x99, 0x64, 0x94, 0xef, 0x36, 0x5e, 0x86, 0x15, 0xd4, 0xfe, 0x5b, 0x0b, 0xa6, 0x78, 0xcf, - 0xaf, 0x92, 0x1d, 0x75, 0x36, 0x7d, 0x27, 0xfb, 0x2e, 0xd2, 0x84, 0x17, 0x72, 0xd2, 0x84, 0xeb, - 0x9f, 0x56, 0xec, 0xf8, 0x69, 0x5f, 0xb5, 0x40, 0xac, 0x90, 0x43, 0x90, 0xb4, 0x7c, 0x9f, 0x29, - 0x69, 0x99, 0xc9, 0xdf, 0x04, 0x39, 0x22, 0x96, 0x7f, 0xb3, 0x60, 0x92, 0x23, 0xc4, 0x56, 0x10, - 0xdf, 0xd1, 0x79, 0x98, 0x37, 0xbf, 0x28, 0xd3, 0xac, 0xf5, 0x2a, 0xd9, 0x59, 0xf3, 0x2b, 0x4e, - 0xb4, 0x99, 0xfd, 0x51, 0xc6, 0x64, 0xf5, 0x75, 0x9c, 0xac, 0xba, 0xdc, 0x40, 0x46, 0x42, 0xc8, - 0x2e, 0x02, 0xe0, 0x83, 0x26, 0x84, 0xb4, 0xff, 0xd1, 0x02, 0xc4, 0x9b, 0x31, 0x18, 0x37, 0xca, - 0x0e, 0xb1, 0x52, 0xed, 0xa2, 0x8b, 0x8f, 0x26, 0x05, 0xc1, 0x1a, 0xd6, 0x03, 0x19, 0x9e, 0x84, - 0x29, 0x4b, 0xb1, 0xbb, 0x29, 0xcb, 0x01, 0x46, 0xf4, 0xab, 0x83, 0x90, 0x74, 0xeb, 0x44, 0x37, - 0x61, 0xb4, 0xe6, 0xb4, 0x9c, 0xdb, 0x6e, 0xc3, 0x8d, 0x5c, 0x12, 0x76, 0xb2, 0x73, 0x5b, 0xd0, - 0xf0, 0x84, 0xf1, 0x81, 0x56, 0x82, 0x0d, 0x3a, 0x68, 0x16, 0xa0, 0x15, 0xb8, 0xdb, 0x6e, 0x83, - 0x6c, 0x30, 0x81, 0x10, 0x8b, 0x85, 0xc4, 0x8d, 0xee, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x42, 0x90, - 0x14, 0x1f, 0x72, 0x08, 0x12, 0x38, 0xb4, 0x10, 0x24, 0x7d, 0x07, 0x0a, 0x41, 0x32, 0x74, 0xe0, - 0x10, 0x24, 0xfd, 0x3d, 0x85, 0x20, 0xc1, 0x70, 0x5c, 0xf2, 0x9e, 0xf4, 0xff, 0xb2, 0xdb, 0x20, - 0xe2, 0xc1, 0xc1, 0x03, 0x38, 0xcd, 0xec, 0xed, 0x96, 0x8e, 0xe3, 0x4c, 0x0c, 0x9c, 0x53, 0x13, - 0x7d, 0x1c, 0xa6, 0x9d, 0x46, 0xc3, 0xbf, 0xa3, 0x26, 0x75, 0x29, 0xac, 0x39, 0x8d, 0x38, 0xae, - 0xdf, 0xd0, 0xfc, 0xa9, 0xbd, 0xdd, 0xd2, 0xf4, 0x5c, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x83, - 0xe1, 0x56, 0xe0, 0xd7, 0x56, 0x34, 0xdf, 0xf3, 0x33, 0x74, 0x00, 0x2b, 0xb2, 0x70, 0x7f, 0xb7, - 0x34, 0xa6, 0xfe, 0xb0, 0x0b, 0x3f, 0xae, 0x90, 0x11, 0xdd, 0x63, 0xe4, 0x61, 0x47, 0xf7, 0x18, - 0x7d, 0xc0, 0xd1, 0x3d, 0xec, 0x2d, 0x38, 0x52, 0x25, 0x81, 0xeb, 0x34, 0xdc, 0x7b, 0x94, 0x27, - 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x07, 0x89, 0x53, 0xbf, 0xa7, 0xa0, 0xe7, 0x9a, 0x5c, 0x46, 0x9e, - 0xf2, 0x31, 0x21, 0xfb, 0xff, 0xb7, 0x60, 0x50, 0xb8, 0x8a, 0x1e, 0x02, 0x67, 0x3a, 0x67, 0xa8, - 0x64, 0x4a, 0xd9, 0x93, 0xc2, 0x3a, 0x93, 0xab, 0x8c, 0x29, 0x27, 0x94, 0x31, 0x8f, 0x76, 0x22, - 0xd2, 0x59, 0x0d, 0xf3, 0x9f, 0x15, 0xe9, 0x0b, 0xc1, 0x08, 0x5a, 0xf0, 0xf0, 0x87, 0x60, 0x15, - 0x06, 0x43, 0xe1, 0x34, 0x5f, 0xc8, 0xf7, 0xe5, 0x49, 0x4e, 0x62, 0x6c, 0x03, 0x29, 0xdc, 0xe4, - 0x25, 0x91, 0x4c, 0x6f, 0xfc, 0xe2, 0x43, 0xf4, 0xc6, 0xef, 0x16, 0xd6, 0xa1, 0xef, 0x41, 0x84, - 0x75, 0xb0, 0xbf, 0xce, 0x6e, 0x67, 0xbd, 0xfc, 0x10, 0x18, 0xb7, 0xcb, 0xe6, 0x3d, 0x6e, 0x77, - 0x58, 0x59, 0xa2, 0x53, 0x39, 0x0c, 0xdc, 0xef, 0x5a, 0x70, 0x3a, 0xe3, 0xab, 0x34, 0x6e, 0xee, - 0x59, 0x18, 0x72, 0xda, 0x75, 0x57, 0xed, 0x65, 0x4d, 0x5b, 0x3c, 0x27, 0xca, 0xb1, 0xc2, 0x40, - 0x0b, 0x30, 0x45, 0xee, 0xb6, 0x5c, 0xae, 0x86, 0xd7, 0x4d, 0xc7, 0x8b, 0xdc, 0xbf, 0x78, 0x29, - 0x09, 0xc4, 0x69, 0x7c, 0x15, 0x1a, 0xae, 0x98, 0x1b, 0x1a, 0xee, 0x37, 0x2d, 0x18, 0x51, 0x6e, - 0xe3, 0x0f, 0x7d, 0xb4, 0xdf, 0x30, 0x47, 0xfb, 0x91, 0x0e, 0xa3, 0x9d, 0x33, 0xcc, 0x7f, 0x53, - 0x50, 0xfd, 0xad, 0xf8, 0x41, 0xd4, 0x03, 0x97, 0x78, 0xff, 0x6e, 0x2f, 0x17, 0x61, 0xc4, 0x69, - 0xb5, 0x24, 0x40, 0xda, 0x2f, 0xb2, 0x14, 0x16, 0x71, 0x31, 0xd6, 0x71, 0x94, 0x17, 0x4e, 0x31, - 0xd7, 0x0b, 0xa7, 0x0e, 0x10, 0x39, 0xc1, 0x06, 0x89, 0x68, 0x99, 0x30, 0xb7, 0xce, 0x3f, 0x6f, - 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0xb2, 0x17, 0x5d, 0x0f, 0xf8, 0x33, - 0x55, 0x0b, 0xc0, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x0c, 0x91, 0xc2, 0xda, 0xe8, 0x37, 0x0d, 0x61, - 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0xcb, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0xc0, 0x82, 0xff, - 0x34, 0xaa, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf5, 0xf0, 0x85, 0x9d, 0x0f, 0x7b, 0xda, 0xb0, 0xee, - 0xcf, 0x1a, 0xc7, 0x38, 0x44, 0x9f, 0x4c, 0x19, 0x37, 0x3d, 0xd7, 0xe5, 0xd6, 0x38, 0x80, 0x39, - 0x13, 0xcb, 0x67, 0xc7, 0xb2, 0x7d, 0x95, 0x2b, 0x62, 0x5f, 0x68, 0xf9, 0xec, 0x04, 0x00, 0xc7, - 0x38, 0x94, 0x61, 0x53, 0x7f, 0xc2, 0x69, 0x14, 0x87, 0x3d, 0x57, 0xd8, 0x21, 0xd6, 0x30, 0xd0, - 0x05, 0x21, 0xb4, 0xe0, 0xba, 0x87, 0x47, 0x12, 0x42, 0x0b, 0x39, 0x5c, 0x9a, 0xa4, 0xe9, 0x22, - 0x8c, 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0x41, 0x5b, 0xe8, 0x8f, 0xa3, 0xeb, 0x2e, 0xc5, 0xc5, - 0x58, 0xc7, 0x41, 0x6b, 0x30, 0x11, 0x72, 0x59, 0x9e, 0x4a, 0xb6, 0xc1, 0x65, 0xa2, 0x4f, 0x2b, - 0x87, 0x7d, 0x13, 0xbc, 0xcf, 0x8a, 0xf8, 0xe9, 0x24, 0xc3, 0x98, 0x24, 0x49, 0xa0, 0xd7, 0x61, - 0xbc, 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0x36, 0x3e, 0x43, 0x46, 0x2c, 0xcb, 0xf1, - 0x6b, 0x06, 0x14, 0x27, 0xb0, 0x29, 0x83, 0xa8, 0x97, 0x88, 0x04, 0x31, 0x8e, 0xb7, 0x41, 0xc2, - 0xe9, 0x61, 0xf6, 0x55, 0x8c, 0x41, 0xbc, 0x96, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x4b, 0x30, 0x2a, - 0x3f, 0x5f, 0x8b, 0xfa, 0x13, 0x3b, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x8e, 0xc9, 0xff, - 0x6b, 0x81, 0xb3, 0xbe, 0xee, 0xd6, 0x44, 0x28, 0x0c, 0xee, 0xfc, 0xfd, 0x31, 0xe9, 0x69, 0xba, - 0x94, 0x85, 0xb4, 0xbf, 0x5b, 0x3a, 0x25, 0x46, 0x2d, 0x13, 0x8e, 0xb3, 0x69, 0xa3, 0x15, 0x38, - 0xc2, 0x6d, 0x60, 0x16, 0x36, 0x49, 0x6d, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x57, - 0xd2, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x07, 0xa6, 0x5b, 0xed, 0xdb, 0x0d, 0x37, 0xdc, 0x5c, 0xf5, - 0x23, 0x66, 0x42, 0x36, 0x57, 0xaf, 0x07, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x91, 0x9a, - 0x2a, 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x07, 0xc7, 0x12, 0x0b, 0x41, 0x84, 0x5c, 0x19, 0xcf, - 0x4f, 0xb5, 0x55, 0xcd, 0xaa, 0x20, 0xa2, 0x17, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x15, 0x00, - 0xb7, 0xb5, 0xec, 0x34, 0xdd, 0x06, 0x7d, 0x8e, 0x1e, 0x61, 0x6b, 0x84, 0x3e, 0x4d, 0xa0, 0x5c, - 0x91, 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc1, 0xb8, 0xf8, 0xb7, 0x23, - 0xa6, 0x74, 0x4a, 0x65, 0x65, 0x1d, 0x97, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda, - 0x80, 0xd3, 0x32, 0x25, 0xac, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xbf, 0xd5, 0x10, 0xf7, 0x29, - 0x9a, 0xeb, 0x84, 0x88, 0x3b, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xb1, 0x38, - 0x22, 0xe8, 0xb5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0xc7, 0x5c, 0x2f, 0x6b, 0x55, 0x1f, 0x67, - 0x84, 0x3e, 0xca, 0x9d, 0xe5, 0x3b, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0xca, 0x70, 0x24, - 0xe2, 0x05, 0x8b, 0x6e, 0xc8, 0xd3, 0xe7, 0xd0, 0x67, 0xdf, 0x09, 0xd6, 0xdc, 0x09, 0xba, 0x9a, - 0xd7, 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0xbd, 0x19, 0x80, 0x7e, 0xc3, 0xa2, 0xb5, 0x35, 0x46, 0x1f, - 0x7d, 0x06, 0x46, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xb9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33, - 0x41, 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x2d, 0x23, 0xc8, 0xc5, 0x85, 0xde, 0x98, 0xa2, - 0xde, 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x0d, 0x86, 0x6a, 0x0d, 0x97, 0x78, 0x51, 0xb9, - 0xd2, 0x29, 0x50, 0xeb, 0x82, 0xc0, 0x11, 0x5b, 0x51, 0x64, 0xbd, 0xe2, 0x65, 0x58, 0x51, 0xb0, - 0x2f, 0xc1, 0x48, 0xb5, 0x41, 0x48, 0x8b, 0xfb, 0x71, 0xa1, 0xa7, 0xd8, 0xc3, 0x84, 0xb1, 0x96, - 0x16, 0x63, 0x2d, 0xf5, 0x37, 0x07, 0x63, 0x2a, 0x25, 0xdc, 0xfe, 0xb3, 0x02, 0x94, 0xba, 0x24, - 0x5f, 0x4b, 0xe8, 0xdb, 0xac, 0x9e, 0xf4, 0x6d, 0x73, 0x30, 0x11, 0xff, 0xd3, 0x45, 0x79, 0xca, - 0x18, 0xfa, 0xa6, 0x09, 0xc6, 0x49, 0xfc, 0x9e, 0xfd, 0x5a, 0x74, 0x95, 0x5d, 0x5f, 0x57, 0xcf, - 0x2c, 0x43, 0x55, 0xdf, 0xdf, 0xfb, 0xdb, 0x3b, 0x57, 0xed, 0x6a, 0x7f, 0xbd, 0x00, 0xc7, 0xd4, - 0x10, 0x7e, 0xef, 0x0e, 0xdc, 0x8d, 0xf4, 0xc0, 0x3d, 0x00, 0xa5, 0xb5, 0x7d, 0x1d, 0x06, 0x78, - 0xf4, 0xd8, 0x1e, 0x78, 0xfe, 0xc7, 0xcc, 0x40, 0xfe, 0x8a, 0xcd, 0x34, 0x82, 0xf9, 0xff, 0x98, - 0x05, 0x13, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0xfb, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0xb3, - 0xd0, 0xb7, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xc5, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0xdf, 0x59, - 0xd0, 0xbf, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x97, 0xef, 0x42, 0x2f, - 0xc1, 0x00, 0x59, 0x5f, 0x27, 0xb5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0x31, 0xb0, 0xc4, 0x4a, 0x29, - 0x13, 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0xb7, 0x60, 0x38, 0x72, 0x9b, 0x64, 0xae, 0x5e, - 0x17, 0x36, 0x01, 0xf7, 0x11, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0xcb, 0x05, 0x80, - 0x38, 0x5a, 0x5d, 0xb7, 0x4f, 0x9c, 0x4f, 0x69, 0x8b, 0xcf, 0x65, 0x68, 0x8b, 0x51, 0x4c, 0x30, - 0x43, 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd3, 0x30, 0xf5, 0x1d, 0x64, 0x98, 0x16, 0x60, 0x2a, 0x8e, - 0xb6, 0x67, 0x06, 0x1b, 0x65, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0xb3, 0x2a, - 0xe8, 0x98, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x32, 0x4e, 0xb1, 0x3a, 0xbc, 0x90, - 0xab, 0x0e, 0xff, 0x45, 0x0b, 0x8e, 0x26, 0xdb, 0x61, 0x7e, 0xf7, 0x5f, 0xb4, 0xe0, 0x58, 0x9c, - 0x7b, 0x28, 0x6d, 0x82, 0xf0, 0x62, 0xc7, 0x40, 0x6a, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64, - 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0x5f, 0x1f, 0x4c, 0xe7, 0x45, 0x60, 0x63, 0x9e, 0x46, 0xce, - 0xdd, 0xea, 0x16, 0xb9, 0x23, 0xfc, 0x39, 0x62, 0x4f, 0x23, 0x5e, 0x8c, 0x25, 0x3c, 0x99, 0x6e, - 0xaa, 0xd0, 0x63, 0xba, 0xa9, 0x4d, 0x98, 0xba, 0xb3, 0x49, 0xbc, 0x1b, 0x5e, 0xe8, 0x44, 0x6e, - 0xb8, 0xee, 0x32, 0x05, 0x3a, 0x5f, 0x37, 0xaf, 0x48, 0xaf, 0x8b, 0x5b, 0x49, 0x84, 0xfd, 0xdd, - 0xd2, 0x69, 0xa3, 0x20, 0xee, 0x32, 0x3f, 0x48, 0x70, 0x9a, 0x68, 0x3a, 0x5b, 0x57, 0xdf, 0x43, - 0xce, 0xd6, 0xd5, 0x74, 0x85, 0xd9, 0x8d, 0x74, 0x23, 0x61, 0xcf, 0xd6, 0x15, 0x55, 0x8a, 0x35, - 0x0c, 0xf4, 0x29, 0x40, 0x7a, 0xba, 0x45, 0x23, 0x00, 0xee, 0x73, 0x7b, 0xbb, 0x25, 0xb4, 0x9a, - 0x82, 0xee, 0xef, 0x96, 0x8e, 0xd0, 0xd2, 0xb2, 0x47, 0x9f, 0xbf, 0x71, 0xd4, 0xc0, 0x0c, 0x42, - 0xe8, 0x16, 0x4c, 0xd2, 0x52, 0xb6, 0xa3, 0x64, 0x74, 0x5d, 0xfe, 0x64, 0x7d, 0x66, 0x6f, 0xb7, - 0x34, 0xb9, 0x9a, 0x80, 0xe5, 0x91, 0x4e, 0x11, 0xc9, 0x48, 0xda, 0x35, 0xd4, 0x6b, 0xd2, 0x2e, - 0xfb, 0x8b, 0x16, 0x9c, 0xa4, 0x17, 0x5c, 0xfd, 0x5a, 0x8e, 0x16, 0xdd, 0x69, 0xb9, 0x5c, 0x4f, - 0x23, 0xae, 0x1a, 0x26, 0xab, 0xab, 0x94, 0xb9, 0x96, 0x46, 0x41, 0xe9, 0x09, 0xbf, 0xe5, 0x7a, - 0xf5, 0xe4, 0x09, 0x7f, 0xd5, 0xf5, 0xea, 0x98, 0x41, 0xd4, 0x95, 0x55, 0xcc, 0x8d, 0xd6, 0xff, - 0x35, 0xba, 0x57, 0x69, 0x5f, 0xbe, 0xa3, 0xdd, 0x40, 0xcf, 0xe8, 0x3a, 0x55, 0x61, 0x3e, 0x99, - 0xab, 0x4f, 0xfd, 0x82, 0x05, 0xc2, 0xfb, 0xbd, 0x87, 0x3b, 0xf9, 0x6d, 0x18, 0xdd, 0x4e, 0xa7, - 0xa2, 0x3d, 0x9b, 0x1f, 0x0e, 0x40, 0x24, 0xa0, 0x55, 0x2c, 0xba, 0x91, 0x76, 0xd6, 0xa0, 0x65, - 0xd7, 0x41, 0x40, 0x17, 0x09, 0xd3, 0x6a, 0x74, 0xef, 0xcd, 0xf3, 0x00, 0x75, 0x86, 0xcb, 0xf2, - 0xd3, 0x17, 0x4c, 0x8e, 0x6b, 0x51, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x7a, 0x11, 0x46, 0x64, 0xea, - 0xd3, 0xb6, 0xd7, 0x8b, 0xec, 0x51, 0x67, 0x9c, 0x0a, 0x5d, 0x19, 0xa7, 0x77, 0x60, 0x2a, 0x20, - 0xb5, 0x76, 0x10, 0xba, 0xdb, 0x44, 0x82, 0xc5, 0x26, 0x99, 0xe5, 0xc9, 0x22, 0x12, 0xc0, 0x7d, - 0x16, 0x22, 0x2b, 0x51, 0xc8, 0x94, 0xc6, 0x69, 0x42, 0xe8, 0x02, 0x0c, 0x33, 0xd1, 0x7b, 0x25, - 0x16, 0x08, 0x2b, 0xc1, 0xd7, 0x8a, 0x04, 0xe0, 0x18, 0x87, 0x3d, 0x0e, 0xda, 0xb7, 0x19, 0x7a, - 0xc2, 0x13, 0xbc, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0x8f, 0xc3, 0x24, 0xaf, 0x17, 0xf8, 0x2d, 0x67, - 0x83, 0xab, 0x04, 0xfb, 0x55, 0x78, 0x9d, 0xc9, 0x95, 0x04, 0x6c, 0x7f, 0xb7, 0x74, 0x34, 0x59, - 0xc6, 0xba, 0x9d, 0xa2, 0xc2, 0x2c, 0xff, 0x78, 0x23, 0xf4, 0xce, 0x48, 0x19, 0x0c, 0xc6, 0x20, - 0xac, 0xe3, 0xd9, 0xff, 0x6a, 0xc1, 0x94, 0x36, 0x55, 0x3d, 0xe7, 0xeb, 0x30, 0x06, 0xa9, 0xd0, - 0xc3, 0x20, 0x1d, 0x2c, 0xda, 0x43, 0xe6, 0x0c, 0xf7, 0x3d, 0xa0, 0x19, 0xb6, 0x3f, 0x03, 0x28, - 0x9d, 0x57, 0x17, 0xbd, 0xc9, 0x0d, 0xf9, 0xdd, 0x80, 0xd4, 0x3b, 0x29, 0xfc, 0xf5, 0xc8, 0x39, - 0xd2, 0x73, 0x95, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0xe3, 0x7d, 0x30, 0x99, 0x8c, 0xd5, 0x81, 0xae, - 0xc0, 0x00, 0xe7, 0xd2, 0x05, 0xf9, 0x0e, 0xf6, 0x64, 0x5a, 0x84, 0x0f, 0x9e, 0x4b, 0x87, 0x73, - 0xf7, 0xa2, 0x3e, 0x7a, 0x07, 0x46, 0xea, 0xfe, 0x1d, 0xef, 0x8e, 0x13, 0xd4, 0xe7, 0x2a, 0x65, - 0x71, 0x42, 0x64, 0x0a, 0xa0, 0x16, 0x63, 0x34, 0x3d, 0x6a, 0x08, 0xb3, 0x9d, 0x88, 0x41, 0x58, - 0x27, 0x87, 0xd6, 0x58, 0x7a, 0xa7, 0x75, 0x77, 0x63, 0xc5, 0x69, 0x75, 0xf2, 0xea, 0x5a, 0x90, - 0x48, 0x1a, 0xe5, 0x31, 0x91, 0x03, 0x8a, 0x03, 0x70, 0x4c, 0x08, 0x7d, 0x0e, 0x8e, 0x84, 0x39, - 0x2a, 0xb1, 0xbc, 0x34, 0xeb, 0x9d, 0xb4, 0x44, 0x5c, 0x98, 0x92, 0xa5, 0x3c, 0xcb, 0x6a, 0x06, - 0xdd, 0x05, 0x24, 0x44, 0xcf, 0x6b, 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0xd3, 0x3f, - 0x7d, 0x38, 0x5b, 0x4e, 0x90, 0xc4, 0xd6, 0xda, 0x66, 0xe1, 0x85, 0xd3, 0x18, 0x38, 0xa3, 0x0d, - 0xfb, 0x0b, 0x7d, 0x30, 0x23, 0x13, 0x59, 0x67, 0x78, 0xaf, 0x7c, 0xde, 0x4a, 0xb8, 0xaf, 0xbc, - 0x92, 0x7f, 0xd0, 0x3f, 0x34, 0x27, 0x96, 0x2f, 0xa5, 0x9d, 0x58, 0x5e, 0x3b, 0x60, 0x37, 0x1e, - 0x98, 0x2b, 0xcb, 0xf7, 0xac, 0xff, 0xc9, 0xde, 0x51, 0x30, 0xae, 0x66, 0x84, 0x79, 0xec, 0xf6, - 0x8a, 0x54, 0x1d, 0xe5, 0x3c, 0xff, 0xaf, 0x08, 0x1c, 0xe3, 0xb2, 0x1f, 0x95, 0x11, 0xde, 0xd9, - 0x39, 0xab, 0xe8, 0x50, 0x9a, 0xa4, 0xd9, 0x8a, 0x76, 0x16, 0xdd, 0x40, 0xf4, 0x38, 0x93, 0xe6, - 0x92, 0xc0, 0x49, 0xd3, 0x94, 0x10, 0xac, 0xe8, 0xa0, 0x6d, 0x98, 0xda, 0x60, 0x11, 0x9f, 0xb4, - 0x9c, 0xd2, 0xe2, 0x5c, 0xc8, 0xdc, 0xb7, 0x97, 0x17, 0x96, 0xf2, 0x13, 0x50, 0xf3, 0xc7, 0x5f, - 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa8, 0x73, 0x27, 0x5c, 0x6a, 0x38, 0x61, 0xe4, 0xd6, - 0xe6, 0x1b, 0x7e, 0x6d, 0xab, 0x1a, 0xf9, 0x81, 0x4c, 0x16, 0x99, 0xf9, 0xf6, 0x9a, 0xbb, 0x55, - 0x4d, 0xe1, 0x1b, 0xcd, 0x4f, 0xef, 0xed, 0x96, 0x8e, 0x66, 0x61, 0xe1, 0xcc, 0xb6, 0xd0, 0x2a, - 0x0c, 0x6e, 0xb8, 0x11, 0x26, 0x2d, 0x5f, 0x9c, 0x16, 0x99, 0x47, 0xe1, 0x65, 0x8e, 0x62, 0xb4, - 0xc4, 0x22, 0x52, 0x09, 0x00, 0x96, 0x44, 0xd0, 0x9b, 0xea, 0x12, 0x18, 0xc8, 0x17, 0xc0, 0xa6, - 0x6d, 0xef, 0x32, 0xaf, 0x81, 0xd7, 0xa1, 0xe8, 0xad, 0x87, 0x9d, 0x62, 0xf1, 0xac, 0x2e, 0x1b, - 0xf2, 0xb3, 0xf9, 0x41, 0xfa, 0x34, 0x5e, 0x5d, 0xae, 0x62, 0x5a, 0x91, 0xb9, 0xbd, 0x86, 0xb5, - 0xd0, 0x15, 0x89, 0xa7, 0x32, 0xbd, 0x80, 0xcb, 0xd5, 0x85, 0x6a, 0xd9, 0xa0, 0xc1, 0xa2, 0x1a, - 0xb2, 0x62, 0xcc, 0xab, 0xa3, 0x9b, 0x30, 0xbc, 0xc1, 0x0f, 0xbe, 0xf5, 0x50, 0x24, 0xb3, 0xcf, - 0xbc, 0x8c, 0x2e, 0x4b, 0x24, 0x83, 0x1e, 0xbb, 0x32, 0x14, 0x08, 0xc7, 0xa4, 0xd0, 0x17, 0x2c, - 0x38, 0xd6, 0x4a, 0x48, 0x50, 0x99, 0xb3, 0x9a, 0x30, 0x53, 0xcb, 0x74, 0x00, 0xa8, 0x64, 0x55, - 0x30, 0x1a, 0x64, 0xea, 0x97, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, 0x1d, 0xe8, 0xe0, 0x76, 0xbd, 0x53, - 0xae, 0xa2, 0x44, 0x60, 0x22, 0x3e, 0xd0, 0x78, 0x7e, 0x11, 0xd3, 0x8a, 0x68, 0x0d, 0x60, 0xbd, - 0x41, 0x44, 0xc4, 0x47, 0x61, 0x14, 0x95, 0x79, 0xfb, 0x2f, 0x2b, 0x2c, 0x41, 0x87, 0xbd, 0x44, - 0xe3, 0x52, 0xac, 0xd1, 0xa1, 0x4b, 0xa9, 0xe6, 0x7a, 0x75, 0x12, 0x30, 0xe5, 0x56, 0xce, 0x52, - 0x5a, 0x60, 0x18, 0xe9, 0xa5, 0xc4, 0xcb, 0xb1, 0xa0, 0xc0, 0x68, 0x91, 0xd6, 0xe6, 0x7a, 0xd8, - 0x29, 0x2b, 0xc6, 0x02, 0x69, 0x6d, 0x26, 0x16, 0x14, 0xa7, 0xc5, 0xca, 0xb1, 0xa0, 0x40, 0xb7, - 0xcc, 0x3a, 0xdd, 0x40, 0x24, 0x98, 0x9e, 0xc8, 0xdf, 0x32, 0xcb, 0x1c, 0x25, 0xbd, 0x65, 0x04, - 0x00, 0x4b, 0x22, 0xe8, 0xd3, 0x26, 0xb7, 0x33, 0xc9, 0x68, 0x3e, 0xd3, 0x85, 0xdb, 0x31, 0xe8, - 0x76, 0xe6, 0x77, 0x5e, 0x81, 0xc2, 0x7a, 0x8d, 0x29, 0xc5, 0x72, 0x74, 0x06, 0xcb, 0x0b, 0x06, - 0x35, 0x16, 0x65, 0x7e, 0x79, 0x01, 0x17, 0xd6, 0x6b, 0x74, 0xe9, 0x3b, 0xf7, 0xda, 0x01, 0x59, - 0x76, 0x1b, 0x44, 0x64, 0xc8, 0xc8, 0x5c, 0xfa, 0x73, 0x12, 0x29, 0xbd, 0xf4, 0x15, 0x08, 0xc7, - 0xa4, 0x28, 0xdd, 0x98, 0x07, 0x3b, 0x92, 0x4f, 0x57, 0xb1, 0x5a, 0x69, 0xba, 0x99, 0x5c, 0xd8, - 0x16, 0x8c, 0x6d, 0x87, 0xad, 0x4d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb, 0x89, 0x54, 0x71, 0x53, - 0x20, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e, 0xea, 0xc4, 0xb0, 0x49, - 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08, 0x19, 0x11, 0xe7, 0xf8, - 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0xf1, 0x2e, 0x83, 0x9d, 0xea, 0x6f, - 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb5, 0xe9, 0x47, 0xbe, 0x97, 0xb8, 0xe4, - 0x4e, 0xe4, 0x5f, 0x34, 0x95, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85, 0x33, 0xdb, 0xa2, 0x1f, - 0xd7, 0x92, 0x91, 0x01, 0x45, 0x16, 0x8f, 0xa7, 0x72, 0x02, 0x6b, 0xa6, 0xc3, 0x07, 0xf2, 0x8f, - 0x53, 0x20, 0x1c, 0x93, 0x42, 0x75, 0x18, 0x6f, 0x19, 0x11, 0x67, 0x59, 0x36, 0x92, 0x1c, 0xbe, - 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c, 0xf7, 0xb8, 0xab, 0x1f, - 0x4b, 0x56, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00, 0x4b, 0x22, 0x74, 0x34, - 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x9c, 0x3f, 0x79, 0x0a, 0xf6, 0x2c, 0x35, 0x91, 0x0c, 0xb3, 0x2e, - 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xca, 0x3f, 0xc9, 0x93, 0xd7, 0x1d, 0x3b, - 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0xf2, 0x95, 0xe4, 0xf4, 0x4b, 0x85, - 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34, 0xdd, 0x99, 0x0e, 0x57, - 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0, 0x4c, 0xe7, 0x7d, 0x1b, - 0xeb, 0xd0, 0x2a, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31, 0x56, 0xcf, 0x01, 0x87, - 0x2f, 0xc3, 0x94, 0x72, 0x47, 0x6c, 0xb8, 0xb5, 0x1d, 0x2d, 0x49, 0xa9, 0x0a, 0xcd, 0x53, 0x4d, - 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x07, 0x13, 0x46, 0x61, 0x79, 0x51, 0x3c, 0xff, 0xe3, 0x4c, 0x1b, - 0x26, 0x18, 0x27, 0xf1, 0xed, 0xdf, 0xb0, 0xe0, 0x44, 0x4e, 0xfe, 0xfb, 0x9e, 0xe3, 0xe9, 0xae, - 0xc3, 0x44, 0xcb, 0xac, 0xda, 0x25, 0x04, 0xb8, 0x91, 0x65, 0x5f, 0xf5, 0x35, 0x01, 0xc0, 0x49, - 0xa2, 0xf6, 0xaf, 0x15, 0xe0, 0x74, 0x47, 0xfb, 0x7a, 0x84, 0xe1, 0xf8, 0x46, 0x33, 0x74, 0x16, - 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x8b, 0xd4, 0x34, 0x2d, 0x28, 0x33, 0x54, 0xbf, - 0xbc, 0x52, 0x9d, 0x4b, 0x63, 0xe0, 0x9c, 0x9a, 0x68, 0x19, 0x50, 0x1a, 0x22, 0x66, 0x98, 0x3d, - 0x71, 0xd3, 0xf4, 0x70, 0x46, 0x0d, 0xf4, 0x32, 0x8c, 0x29, 0xbb, 0x7d, 0x6d, 0xc6, 0xd9, 0x05, - 0x81, 0x75, 0x00, 0x36, 0xf1, 0xd0, 0x45, 0x9e, 0x82, 0x49, 0x24, 0xeb, 0x12, 0x2a, 0xd3, 0x09, - 0x99, 0x5f, 0x49, 0x14, 0x63, 0x1d, 0x67, 0xfe, 0xd2, 0x5f, 0x7c, 0xeb, 0xcc, 0x87, 0xfe, 0xea, - 0x5b, 0x67, 0x3e, 0xf4, 0xb7, 0xdf, 0x3a, 0xf3, 0xa1, 0x1f, 0xda, 0x3b, 0x63, 0xfd, 0xc5, 0xde, - 0x19, 0xeb, 0xaf, 0xf6, 0xce, 0x58, 0x7f, 0xbb, 0x77, 0xc6, 0xfa, 0xdf, 0xf7, 0xce, 0x58, 0x5f, - 0xfe, 0x3f, 0xce, 0x7c, 0xe8, 0x6d, 0x14, 0x47, 0xa8, 0xbe, 0x40, 0x67, 0xe7, 0xc2, 0xf6, 0xc5, - 0xff, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf5, 0xf1, 0x8c, 0x4c, 0x2d, 0x26, 0x01, 0x00, + // 16665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x5b, 0x90, 0x5c, 0x49, + 0x76, 0x18, 0xb6, 0xb7, 0xaa, 0x9f, 0xa7, 0xdf, 0x89, 0x57, 0xa1, 0x07, 0x40, 0x61, 0xee, 0xcc, + 0x60, 0x30, 0x3b, 0x33, 0x8d, 0xc5, 0x3c, 0x76, 0xb1, 0x33, 0xb3, 0xc3, 0xe9, 0x27, 0xd0, 0x03, + 0x74, 0xa3, 0x26, 0xab, 0x01, 0xec, 0x63, 0x76, 0xb5, 0x17, 0x55, 0xd9, 0xdd, 0x77, 0xbb, 0xea, + 0xde, 0x9a, 0x7b, 0x6f, 0x35, 0xd0, 0x30, 0x15, 0xa4, 0x56, 0xe6, 0x4a, 0x4b, 0xd2, 0x11, 0x1b, + 0x0a, 0x4b, 0x72, 0x90, 0x0a, 0x7e, 0xe8, 0x45, 0xd2, 0xb4, 0x64, 0x52, 0xa4, 0x45, 0x59, 0x14, + 0x29, 0xda, 0x96, 0x23, 0x68, 0x7f, 0xc8, 0x14, 0x23, 0xcc, 0x65, 0x58, 0xe1, 0x96, 0xd9, 0xb6, + 0x42, 0xc1, 0x0f, 0x53, 0x0a, 0xda, 0x1f, 0x76, 0x87, 0x6c, 0x2a, 0xf2, 0x79, 0x33, 0xef, 0xab, + 0xaa, 0x31, 0x40, 0xef, 0x70, 0x63, 0xfe, 0xaa, 0xf2, 0x9c, 0x3c, 0x99, 0x37, 0x1f, 0x27, 0x4f, + 0x9e, 0x73, 0xf2, 0x1c, 0xb0, 0x77, 0xae, 0x85, 0x73, 0xae, 0x7f, 0xc5, 0xe9, 0xb8, 0x57, 0x1a, + 0x7e, 0x40, 0xae, 0xec, 0x5e, 0xbd, 0xb2, 0x45, 0x3c, 0x12, 0x38, 0x11, 0x69, 0xce, 0x75, 0x02, + 0x3f, 0xf2, 0x11, 0xe2, 0x38, 0x73, 0x4e, 0xc7, 0x9d, 0xa3, 0x38, 0x73, 0xbb, 0x57, 0x67, 0x5f, + 0xdd, 0x72, 0xa3, 0xed, 0xee, 0xfd, 0xb9, 0x86, 0xdf, 0xbe, 0xb2, 0xe5, 0x6f, 0xf9, 0x57, 0x18, + 0xea, 0xfd, 0xee, 0x26, 0xfb, 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xec, 0x1b, 0x71, 0x33, 0x6d, + 0xa7, 0xb1, 0xed, 0x7a, 0x24, 0xd8, 0xbb, 0xd2, 0xd9, 0xd9, 0x62, 0xed, 0x06, 0x24, 0xf4, 0xbb, + 0x41, 0x83, 0x24, 0x1b, 0x2e, 0xac, 0x15, 0x5e, 0x69, 0x93, 0xc8, 0xc9, 0xe8, 0xee, 0xec, 0x95, + 0xbc, 0x5a, 0x41, 0xd7, 0x8b, 0xdc, 0x76, 0xba, 0x99, 0xcf, 0xf7, 0xaa, 0x10, 0x36, 0xb6, 0x49, + 0xdb, 0x49, 0xd5, 0x7b, 0x3d, 0xaf, 0x5e, 0x37, 0x72, 0x5b, 0x57, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, + 0x59, 0xc9, 0xfe, 0xbe, 0x05, 0x17, 0xe7, 0xef, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0x85, + 0x96, 0xdf, 0xd8, 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0x6e, 0x9b, 0xd4, 0xd9, 0x40, 0xa0, + 0x57, 0x60, 0x64, 0x97, 0xfd, 0x5f, 0x5d, 0xaa, 0x58, 0x17, 0xad, 0xcb, 0xa3, 0x0b, 0xd3, 0xbf, + 0xb3, 0x5f, 0xfd, 0xcc, 0xc1, 0x7e, 0x75, 0xe4, 0xae, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x82, 0xa1, + 0xcd, 0x70, 0x63, 0xaf, 0x43, 0x2a, 0x25, 0x86, 0x3b, 0x29, 0x70, 0x87, 0x56, 0xea, 0xb4, 0x14, + 0x0b, 0x28, 0xba, 0x02, 0xa3, 0x1d, 0x27, 0x88, 0xdc, 0xc8, 0xf5, 0xbd, 0x4a, 0xf9, 0xa2, 0x75, + 0x79, 0x70, 0x61, 0x46, 0xa0, 0x8e, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xb4, 0x1b, 0x01, 0x71, 0x9a, + 0xb7, 0xbd, 0xd6, 0x5e, 0x65, 0xe0, 0xa2, 0x75, 0x79, 0x24, 0xee, 0x06, 0x16, 0xe5, 0x58, 0x61, + 0xd8, 0x3f, 0x53, 0x82, 0x91, 0xf9, 0xcd, 0x4d, 0xd7, 0x73, 0xa3, 0x3d, 0x74, 0x17, 0xc6, 0x3d, + 0xbf, 0x49, 0xe4, 0x7f, 0xf6, 0x15, 0x63, 0xaf, 0x5d, 0x9c, 0x4b, 0x2f, 0xa5, 0xb9, 0x75, 0x0d, + 0x6f, 0x61, 0xfa, 0x60, 0xbf, 0x3a, 0xae, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd6, 0xf1, 0x9b, + 0x8a, 0x6c, 0x89, 0x91, 0xad, 0x66, 0x91, 0xad, 0xc5, 0x68, 0x0b, 0x53, 0x07, 0xfb, 0xd5, 0x31, + 0xad, 0x00, 0xeb, 0x44, 0xd0, 0x7d, 0x98, 0xa2, 0x7f, 0xbd, 0xc8, 0x55, 0x74, 0xcb, 0x8c, 0xee, + 0x73, 0x79, 0x74, 0x35, 0xd4, 0x85, 0x13, 0x07, 0xfb, 0xd5, 0xa9, 0x44, 0x21, 0x4e, 0x12, 0xb4, + 0x7f, 0xda, 0x82, 0xa9, 0xf9, 0x4e, 0x67, 0x3e, 0x68, 0xfb, 0x41, 0x2d, 0xf0, 0x37, 0xdd, 0x16, + 0x41, 0x5f, 0x80, 0x81, 0x88, 0xce, 0x1a, 0x9f, 0xe1, 0xe7, 0xc4, 0xd0, 0x0e, 0xd0, 0xb9, 0x3a, + 0xdc, 0xaf, 0x9e, 0x48, 0xa0, 0xb3, 0xa9, 0x64, 0x15, 0xd0, 0x7b, 0x30, 0xdd, 0xf2, 0x1b, 0x4e, + 0x6b, 0xdb, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f, 0xf2, 0x60, 0xbf, 0x3a, 0x7d, 0x2b, 0x01, 0xc3, + 0x29, 0x6c, 0xfb, 0x11, 0x4c, 0xce, 0x47, 0x91, 0xd3, 0xd8, 0x26, 0x4d, 0xbe, 0xa0, 0xd0, 0x1b, + 0x30, 0xe0, 0x39, 0x6d, 0xd9, 0x99, 0x8b, 0xb2, 0x33, 0xeb, 0x4e, 0x9b, 0x76, 0x66, 0xfa, 0x8e, + 0xe7, 0x7e, 0xd4, 0x15, 0x8b, 0x94, 0x96, 0x61, 0x86, 0x8d, 0x5e, 0x03, 0x68, 0x92, 0x5d, 0xb7, + 0x41, 0x6a, 0x4e, 0xb4, 0x2d, 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x52, 0x10, 0xac, 0x61, 0xd9, 0x0f, + 0x61, 0x74, 0x7e, 0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x19, 0xa2, 0x1d, 0x98, 0xea, 0x04, 0x64, 0x93, + 0x04, 0xaa, 0xa8, 0x62, 0x5d, 0x2c, 0x5f, 0x1e, 0x7b, 0xed, 0x72, 0xe6, 0xd8, 0x9b, 0xa8, 0xcb, + 0x5e, 0x14, 0xec, 0x2d, 0x9c, 0x11, 0xed, 0x4d, 0x25, 0xa0, 0x38, 0x49, 0xd9, 0xfe, 0x67, 0x25, + 0x38, 0x35, 0xff, 0xa8, 0x1b, 0x90, 0x25, 0x37, 0xdc, 0x49, 0x6e, 0xb8, 0xa6, 0x1b, 0xee, 0xac, + 0xc7, 0x23, 0xa0, 0x56, 0xfa, 0x92, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x0a, 0xc3, 0xf4, 0xf7, 0x1d, + 0xbc, 0x2a, 0x3e, 0xf9, 0x84, 0x40, 0x1e, 0x5b, 0x72, 0x22, 0x67, 0x89, 0x83, 0xb0, 0xc4, 0x41, + 0x6b, 0x30, 0xd6, 0x60, 0xfc, 0x61, 0x6b, 0xcd, 0x6f, 0x12, 0xb6, 0xb6, 0x46, 0x17, 0x5e, 0xa6, + 0xe8, 0x8b, 0x71, 0xf1, 0xe1, 0x7e, 0xb5, 0xc2, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, + 0xb6, 0xda, 0xee, 0x03, 0x8c, 0x12, 0x64, 0x6c, 0xf5, 0xcb, 0xda, 0xce, 0x1d, 0x64, 0x3b, 0x77, + 0x3c, 0x7b, 0xd7, 0xa2, 0xab, 0x30, 0xb0, 0xe3, 0x7a, 0xcd, 0xca, 0x10, 0xa3, 0x75, 0x9e, 0xce, + 0xf9, 0x4d, 0xd7, 0x6b, 0x1e, 0xee, 0x57, 0x67, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0xff, + 0xcb, 0x82, 0x2a, 0x83, 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, + 0x40, 0x5f, 0x03, 0x08, 0x49, 0x23, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0xd4, 0x15, 0x04, 0x6b, + 0x58, 0x94, 0x3f, 0x85, 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0xc5, 0x9f, 0xea, 0x12, 0x80, + 0x63, 0x1c, 0x83, 0x3f, 0x95, 0x7b, 0xf1, 0x27, 0xf4, 0x25, 0x98, 0x8a, 0x1b, 0x0b, 0x3b, 0x4e, + 0x43, 0x0e, 0x20, 0xdb, 0xc1, 0x75, 0x13, 0x84, 0x93, 0xb8, 0xf6, 0x7f, 0x6e, 0x89, 0xc5, 0x43, + 0xbf, 0xfa, 0x13, 0xfe, 0xad, 0xf6, 0x3f, 0xb2, 0x60, 0x78, 0xc1, 0xf5, 0x9a, 0xae, 0xb7, 0x85, + 0xbe, 0x09, 0x23, 0xf4, 0xa8, 0x6c, 0x3a, 0x91, 0x23, 0xd8, 0xf0, 0xe7, 0xb4, 0xbd, 0xa5, 0x4e, + 0xae, 0xb9, 0xce, 0xce, 0x16, 0x2d, 0x08, 0xe7, 0x28, 0x36, 0xdd, 0x6d, 0xb7, 0xef, 0x7f, 0x8b, + 0x34, 0xa2, 0x35, 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x28, 0x72, + 0x82, 0x2d, 0x12, 0x09, 0x7e, 0x9c, 0xc9, 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x1a, 0x24, + 0x3e, 0xa5, 0x36, 0x58, 0x55, 0x2c, 0x48, 0xd8, 0xff, 0xdf, 0x30, 0x9c, 0x5d, 0xac, 0xaf, 0xe6, + 0xac, 0xab, 0x4b, 0x30, 0xd4, 0x0c, 0xdc, 0x5d, 0x12, 0x88, 0x71, 0x56, 0x54, 0x96, 0x58, 0x29, + 0x16, 0x50, 0x74, 0x0d, 0xc6, 0xf9, 0xf9, 0x78, 0xc3, 0xf1, 0x9a, 0x31, 0x7b, 0x14, 0xd8, 0xe3, + 0x77, 0x35, 0x18, 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0x25, 0x36, 0x63, 0xde, 0xd9, 0xfb, 0x5d, + 0x0b, 0xa6, 0x79, 0x33, 0xf3, 0x51, 0x14, 0xb8, 0xf7, 0xbb, 0x11, 0x09, 0x2b, 0x83, 0x8c, 0xd3, + 0x2d, 0x66, 0x8d, 0x56, 0xee, 0x08, 0xcc, 0xdd, 0x4d, 0x50, 0xe1, 0x4c, 0xb0, 0x22, 0xda, 0x9d, + 0x4e, 0x82, 0x71, 0xaa, 0x59, 0xf4, 0x17, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16, + 0x09, 0x6a, 0xdd, 0xfb, 0x2d, 0x37, 0xdc, 0xe6, 0xeb, 0x14, 0x93, 0x4d, 0xc6, 0x09, 0x72, 0xe6, + 0x50, 0x21, 0x89, 0x39, 0xbc, 0x70, 0xb0, 0x5f, 0x9d, 0x5d, 0xcc, 0x25, 0x85, 0x0b, 0x9a, 0x41, + 0x3b, 0x80, 0xe8, 0xc9, 0x5e, 0x8f, 0x9c, 0x2d, 0x12, 0x37, 0x3e, 0xdc, 0x7f, 0xe3, 0xa7, 0x0f, + 0xf6, 0xab, 0x68, 0x3d, 0x45, 0x02, 0x67, 0x90, 0x45, 0x1f, 0xc1, 0x49, 0x5a, 0x9a, 0xfa, 0xd6, + 0x91, 0xfe, 0x9b, 0xab, 0x1c, 0xec, 0x57, 0x4f, 0xae, 0x67, 0x10, 0xc1, 0x99, 0xa4, 0xd1, 0x8f, + 0x5b, 0x70, 0x36, 0xfe, 0xfc, 0xe5, 0x87, 0x1d, 0xc7, 0x6b, 0xc6, 0x0d, 0x8f, 0xf6, 0xdf, 0x30, + 0xe5, 0xc9, 0x67, 0x17, 0xf3, 0x28, 0xe1, 0xfc, 0x46, 0x90, 0x07, 0x27, 0x68, 0xd7, 0x92, 0x6d, + 0x43, 0xff, 0x6d, 0x9f, 0x39, 0xd8, 0xaf, 0x9e, 0x58, 0x4f, 0xd3, 0xc0, 0x59, 0x84, 0x67, 0x17, + 0xe1, 0x54, 0xe6, 0xea, 0x44, 0xd3, 0x50, 0xde, 0x21, 0x5c, 0x08, 0x1c, 0xc5, 0xf4, 0x27, 0x3a, + 0x09, 0x83, 0xbb, 0x4e, 0xab, 0x2b, 0x36, 0x26, 0xe6, 0x7f, 0xde, 0x2a, 0x5d, 0xb3, 0xec, 0xff, + 0xbe, 0x0c, 0x53, 0x8b, 0xf5, 0xd5, 0xc7, 0xda, 0xf5, 0xfa, 0xb1, 0x57, 0x2a, 0x3c, 0xf6, 0xe2, + 0x43, 0xb4, 0x9c, 0x7b, 0x88, 0xfe, 0x58, 0xc6, 0x96, 0x1d, 0x60, 0x5b, 0xf6, 0x8b, 0x39, 0x5b, + 0xf6, 0x09, 0x6f, 0xd4, 0xdd, 0x9c, 0x55, 0x3b, 0xc8, 0x26, 0x30, 0x53, 0x42, 0x62, 0xb2, 0x5f, + 0x92, 0xd5, 0x1e, 0x71, 0xe9, 0x3e, 0x99, 0x79, 0x6c, 0xc0, 0xf8, 0xa2, 0xd3, 0x71, 0xee, 0xbb, + 0x2d, 0x37, 0x72, 0x49, 0x88, 0x5e, 0x84, 0xb2, 0xd3, 0x6c, 0x32, 0xe9, 0x6e, 0x74, 0xe1, 0xd4, + 0xc1, 0x7e, 0xb5, 0x3c, 0xdf, 0xa4, 0x62, 0x06, 0x28, 0xac, 0x3d, 0x4c, 0x31, 0xd0, 0x67, 0x61, + 0xa0, 0x19, 0xf8, 0x9d, 0x4a, 0x89, 0x61, 0xd2, 0x5d, 0x3e, 0xb0, 0x14, 0xf8, 0x9d, 0x04, 0x2a, + 0xc3, 0xb1, 0x7f, 0xbb, 0x04, 0xe7, 0x16, 0x49, 0x67, 0x7b, 0xa5, 0x9e, 0x73, 0x5e, 0x5c, 0x86, + 0x91, 0xb6, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x69, 0xb6, 0x22, 0xd6, 0x44, 0x19, 0x56, 0x50, + 0x74, 0x11, 0x06, 0x3a, 0xb1, 0x10, 0x3b, 0x2e, 0x05, 0x60, 0x26, 0xbe, 0x32, 0x08, 0xc5, 0xe8, + 0x86, 0x24, 0x10, 0x2b, 0x46, 0x61, 0xdc, 0x09, 0x49, 0x80, 0x19, 0x24, 0x96, 0x04, 0xa8, 0x8c, + 0x20, 0x4e, 0x84, 0x84, 0x24, 0x40, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x18, 0x0d, 0x13, 0x33, 0xdb, + 0xd7, 0xd6, 0x9c, 0x60, 0xa2, 0x82, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0x82, 0x0d, 0xf5, 0x14, 0x15, + 0x7e, 0xa3, 0x04, 0x88, 0x0f, 0xe1, 0x9f, 0xb1, 0x81, 0xbb, 0x93, 0x1e, 0xb8, 0xfe, 0xb7, 0xc4, + 0x93, 0x1a, 0xbd, 0xff, 0xdb, 0x82, 0x73, 0x8b, 0xae, 0xd7, 0x24, 0x41, 0xce, 0x02, 0x7c, 0x3a, + 0x57, 0xf9, 0xa3, 0x09, 0x29, 0xc6, 0x12, 0x1b, 0x78, 0x02, 0x4b, 0xcc, 0xfe, 0xb7, 0x16, 0x20, + 0xfe, 0xd9, 0x9f, 0xb8, 0x8f, 0xbd, 0x93, 0xfe, 0xd8, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb9, + 0xd8, 0x72, 0x89, 0x17, 0xad, 0xd6, 0x16, 0x7d, 0x6f, 0xd3, 0xdd, 0x42, 0x6f, 0xc1, 0x64, 0xe4, + 0xb6, 0x89, 0xdf, 0x8d, 0xea, 0xa4, 0xe1, 0x7b, 0xec, 0xe6, 0x6a, 0x5d, 0x1e, 0x5c, 0x40, 0x07, + 0xfb, 0xd5, 0xc9, 0x0d, 0x03, 0x82, 0x13, 0x98, 0xf6, 0xdf, 0xa5, 0x7c, 0xab, 0xd5, 0x0d, 0x23, + 0x12, 0x6c, 0x04, 0xdd, 0x30, 0x5a, 0xe8, 0x52, 0xd9, 0xb3, 0x16, 0xf8, 0xb4, 0x3b, 0xae, 0xef, + 0xa1, 0x73, 0xc6, 0x75, 0x7c, 0x44, 0x5e, 0xc5, 0xc5, 0xb5, 0x7b, 0x0e, 0x20, 0x74, 0xb7, 0x3c, + 0x12, 0x68, 0xd7, 0x87, 0x49, 0xb6, 0x55, 0x54, 0x29, 0xd6, 0x30, 0x50, 0x0b, 0x26, 0x5a, 0xce, + 0x7d, 0xd2, 0xaa, 0x93, 0x16, 0x69, 0x44, 0x7e, 0x20, 0xf4, 0x1b, 0xaf, 0xf7, 0x77, 0x0f, 0xb8, + 0xa5, 0x57, 0x5d, 0x98, 0x39, 0xd8, 0xaf, 0x4e, 0x18, 0x45, 0xd8, 0x24, 0x4e, 0x59, 0x87, 0xdf, + 0xa1, 0x5f, 0xe1, 0xb4, 0xf4, 0xcb, 0xe7, 0x6d, 0x51, 0x86, 0x15, 0x54, 0xb1, 0x8e, 0x81, 0x3c, + 0xd6, 0x61, 0xff, 0x4b, 0xba, 0xd0, 0xfc, 0x76, 0xc7, 0xf7, 0x88, 0x17, 0x2d, 0xfa, 0x5e, 0x93, + 0x6b, 0xa6, 0xde, 0x32, 0x54, 0x27, 0x97, 0x12, 0xaa, 0x93, 0xd3, 0xe9, 0x1a, 0x9a, 0xf6, 0xe4, + 0x8b, 0x30, 0x14, 0x46, 0x4e, 0xd4, 0x0d, 0xc5, 0xc0, 0x3d, 0x2b, 0x97, 0x5d, 0x9d, 0x95, 0x1e, + 0xee, 0x57, 0xa7, 0x54, 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x12, 0x0c, 0xb7, 0x49, 0x18, 0x3a, + 0x5b, 0x52, 0x6c, 0x98, 0x12, 0x75, 0x87, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x1c, 0x0c, 0x92, + 0x20, 0xf0, 0x03, 0xf1, 0x6d, 0x13, 0x02, 0x71, 0x70, 0x99, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x27, + 0x0b, 0xa6, 0x54, 0x5f, 0x79, 0x5b, 0xc7, 0x70, 0x5d, 0xfb, 0x2a, 0x40, 0x43, 0x7e, 0x60, 0xc8, + 0x8e, 0xd9, 0xb1, 0xd7, 0x2e, 0x65, 0x4a, 0x34, 0xa9, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b, + 0xd4, 0xec, 0xdf, 0xb4, 0xe0, 0x44, 0xe2, 0x8b, 0x6e, 0xb9, 0x61, 0x84, 0x3e, 0x4c, 0x7d, 0xd5, + 0x5c, 0x9f, 0x8b, 0xcf, 0x0d, 0xf9, 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x01, 0x83, + 0x6e, 0x44, 0xda, 0xf2, 0x63, 0x9e, 0x2b, 0xfc, 0x18, 0xde, 0xab, 0x78, 0x46, 0x56, 0x69, 0x4d, + 0xcc, 0x09, 0xd8, 0xbf, 0x5d, 0x86, 0x51, 0xbe, 0xbf, 0xd7, 0x9c, 0xce, 0x31, 0xcc, 0xc5, 0xcb, + 0x30, 0xea, 0xb6, 0xdb, 0xdd, 0xc8, 0xb9, 0x2f, 0xce, 0xbd, 0x11, 0xce, 0x83, 0x56, 0x65, 0x21, + 0x8e, 0xe1, 0x68, 0x15, 0x06, 0x58, 0x57, 0xf8, 0x57, 0xbe, 0x98, 0xfd, 0x95, 0xa2, 0xef, 0x73, + 0x4b, 0x4e, 0xe4, 0x70, 0x91, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x77, + 0x3d, 0x27, 0xd8, 0xa3, 0x65, 0x95, 0x32, 0x23, 0xf8, 0x6a, 0x31, 0xc1, 0x05, 0x85, 0xcf, 0xc9, + 0xaa, 0x0f, 0x8b, 0x01, 0x58, 0x23, 0x3a, 0xfb, 0x05, 0x18, 0x55, 0xc8, 0x47, 0x91, 0x1c, 0x67, + 0xbf, 0x04, 0x53, 0x89, 0xb6, 0x7a, 0x55, 0x1f, 0xd7, 0x05, 0xcf, 0x7f, 0xcc, 0x58, 0x86, 0xe8, + 0xf5, 0xb2, 0xb7, 0x2b, 0xce, 0xa6, 0x47, 0x70, 0xb2, 0x95, 0xc1, 0xf2, 0xc5, 0xbc, 0xf6, 0x7f, + 0x44, 0x9c, 0x13, 0x9f, 0x7d, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x30, 0x38, 0x62, 0xa9, 0x88, 0x23, + 0x52, 0x7e, 0x77, 0x52, 0x75, 0xfe, 0x26, 0xd9, 0x53, 0x4c, 0xf5, 0x07, 0xd9, 0xfd, 0xf3, 0x7c, + 0xf4, 0x39, 0xbb, 0x1c, 0x13, 0x04, 0xca, 0x37, 0xc9, 0x1e, 0x9f, 0x0a, 0xfd, 0xeb, 0xca, 0x85, + 0x5f, 0xf7, 0x2b, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x06, 0xbe, 0xb0, 0x60, 0xf2, 0x85, 0xf3, 0x85, + 0x0b, 0x3c, 0x87, 0x23, 0xfc, 0x46, 0x09, 0xce, 0x2a, 0x1c, 0x7a, 0x89, 0xe2, 0x7f, 0xc4, 0xaa, + 0xba, 0x02, 0xa3, 0x9e, 0x52, 0x27, 0x5a, 0xa6, 0x1e, 0x2f, 0x56, 0x26, 0xc6, 0x38, 0xf4, 0xc8, + 0xf3, 0xe2, 0x43, 0x7b, 0x5c, 0xd7, 0xb3, 0x8b, 0xc3, 0x7d, 0x01, 0xca, 0x5d, 0xb7, 0x29, 0x0e, + 0x98, 0xcf, 0xc9, 0xd1, 0xbe, 0xb3, 0xba, 0x74, 0xb8, 0x5f, 0x7d, 0x36, 0xcf, 0xe4, 0x44, 0x4f, + 0xb6, 0x70, 0xee, 0xce, 0xea, 0x12, 0xa6, 0x95, 0xd1, 0x3c, 0x4c, 0x49, 0xab, 0xda, 0x5d, 0x2a, + 0x97, 0xfa, 0x9e, 0x38, 0x87, 0x94, 0xb2, 0x1c, 0x9b, 0x60, 0x9c, 0xc4, 0x47, 0x4b, 0x30, 0xbd, + 0xd3, 0xbd, 0x4f, 0x5a, 0x24, 0xe2, 0x1f, 0x7c, 0x93, 0x70, 0x55, 0xf2, 0x68, 0x7c, 0x85, 0xbd, + 0x99, 0x80, 0xe3, 0x54, 0x0d, 0xfb, 0x4f, 0xd9, 0x79, 0x20, 0x46, 0x4f, 0x93, 0x6f, 0x7e, 0x90, + 0xcb, 0xb9, 0x9f, 0x55, 0x71, 0x93, 0xec, 0x6d, 0xf8, 0x54, 0x0e, 0xc9, 0x5e, 0x15, 0xc6, 0x9a, + 0x1f, 0x28, 0x5c, 0xf3, 0xbf, 0x56, 0x82, 0x53, 0x6a, 0x04, 0x0c, 0x69, 0xf9, 0xcf, 0xfa, 0x18, + 0x5c, 0x85, 0xb1, 0x26, 0xd9, 0x74, 0xba, 0xad, 0x48, 0xd9, 0x35, 0x06, 0xb9, 0xa9, 0x6d, 0x29, + 0x2e, 0xc6, 0x3a, 0xce, 0x11, 0x86, 0xed, 0x6f, 0x4d, 0xb2, 0x83, 0x38, 0x72, 0xe8, 0x1a, 0x57, + 0xbb, 0xc6, 0xca, 0xdd, 0x35, 0xcf, 0xc1, 0xa0, 0xdb, 0xa6, 0x82, 0x59, 0xc9, 0x94, 0xb7, 0x56, + 0x69, 0x21, 0xe6, 0x30, 0xf4, 0x02, 0x0c, 0x37, 0xfc, 0x76, 0xdb, 0xf1, 0x9a, 0xec, 0xc8, 0x1b, + 0x5d, 0x18, 0xa3, 0xb2, 0xdb, 0x22, 0x2f, 0xc2, 0x12, 0x46, 0x85, 0x6f, 0x27, 0xd8, 0xe2, 0xca, + 0x1e, 0x21, 0x7c, 0xcf, 0x07, 0x5b, 0x21, 0x66, 0xa5, 0xf4, 0xae, 0xfa, 0xc0, 0x0f, 0x76, 0x5c, + 0x6f, 0x6b, 0xc9, 0x0d, 0xc4, 0x96, 0x50, 0x67, 0xe1, 0x3d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x81, + 0xc1, 0x8e, 0x1f, 0x44, 0x61, 0x65, 0x88, 0x0d, 0xf7, 0xb3, 0x39, 0x8c, 0x88, 0x7f, 0x6d, 0xcd, + 0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xea, 0xe8, 0x16, 0x0c, 0x13, 0x6f, 0x77, 0x25, + 0xf0, 0xdb, 0x95, 0x13, 0xf9, 0x94, 0x96, 0x39, 0x0a, 0x5f, 0x66, 0xb1, 0x8c, 0x2a, 0x8a, 0xb1, + 0x24, 0x81, 0xbe, 0x08, 0x65, 0xe2, 0xed, 0x56, 0x86, 0x19, 0xa5, 0xd9, 0x1c, 0x4a, 0x77, 0x9d, + 0x20, 0xe6, 0xf9, 0xcb, 0xde, 0x2e, 0xa6, 0x75, 0xd0, 0x57, 0x60, 0x54, 0x32, 0x8c, 0x50, 0x68, + 0x51, 0x33, 0x17, 0xac, 0x64, 0x33, 0x98, 0x7c, 0xd4, 0x75, 0x03, 0xd2, 0x26, 0x5e, 0x14, 0xc6, + 0x1c, 0x52, 0x42, 0x43, 0x1c, 0x53, 0x43, 0x0d, 0x18, 0x0f, 0x48, 0xe8, 0x3e, 0x22, 0x35, 0xbf, + 0xe5, 0x36, 0xf6, 0x2a, 0x67, 0x58, 0xf7, 0x5e, 0x2a, 0x1c, 0x32, 0xac, 0x55, 0x88, 0xb5, 0xfc, + 0x7a, 0x29, 0x36, 0x88, 0xa2, 0x0f, 0x60, 0x22, 0x20, 0x61, 0xe4, 0x04, 0x91, 0x68, 0xa5, 0xa2, + 0xac, 0x72, 0x13, 0x58, 0x07, 0xf0, 0xeb, 0x44, 0xdc, 0x4c, 0x0c, 0xc1, 0x26, 0x05, 0x14, 0x01, + 0x32, 0x0a, 0x70, 0xb7, 0x45, 0xc2, 0xca, 0xd9, 0x7c, 0x6b, 0x66, 0x92, 0x2c, 0xad, 0xb0, 0x30, + 0x2b, 0x3a, 0x8f, 0x70, 0x8a, 0x16, 0xce, 0xa0, 0x8f, 0xbe, 0x22, 0x0d, 0x1d, 0x6b, 0x7e, 0xd7, + 0x8b, 0xc2, 0xca, 0x28, 0x6b, 0x2f, 0xd3, 0x22, 0x7e, 0x37, 0xc6, 0x4b, 0x5a, 0x42, 0x78, 0x65, + 0x6c, 0x90, 0x42, 0x5f, 0x87, 0x09, 0xfe, 0x9f, 0x1b, 0x72, 0xc3, 0xca, 0x29, 0x46, 0xfb, 0x62, + 0x3e, 0x6d, 0x8e, 0xb8, 0x70, 0x4a, 0x10, 0x9f, 0xd0, 0x4b, 0x43, 0x6c, 0x52, 0x43, 0x18, 0x26, + 0x5a, 0xee, 0x2e, 0xf1, 0x48, 0x18, 0xd6, 0x02, 0xff, 0x3e, 0x11, 0x7a, 0xe9, 0xb3, 0xd9, 0x86, + 0x5f, 0xff, 0x3e, 0x11, 0x57, 0x4f, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x3b, 0x30, 0x19, 0x10, 0xa7, + 0xe9, 0xc6, 0x44, 0xc7, 0x7a, 0x11, 0x65, 0xd7, 0x75, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x36, + 0x8c, 0xb3, 0x81, 0xef, 0x76, 0x38, 0xd1, 0xd3, 0xbd, 0x88, 0x32, 0x37, 0x86, 0xba, 0x56, 0x05, + 0x1b, 0x04, 0xd0, 0xfb, 0x30, 0xda, 0x72, 0x37, 0x49, 0x63, 0xaf, 0xd1, 0x22, 0x95, 0x71, 0x46, + 0x2d, 0x93, 0x05, 0xdf, 0x92, 0x48, 0xfc, 0x56, 0xa0, 0xfe, 0xe2, 0xb8, 0x3a, 0xba, 0x0b, 0xa7, + 0x23, 0x12, 0xb4, 0x5d, 0xcf, 0xa1, 0xac, 0x53, 0x5c, 0x44, 0x99, 0x3d, 0x7e, 0x82, 0xad, 0xe9, + 0x0b, 0x62, 0x36, 0x4e, 0x6f, 0x64, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x21, 0x54, 0x32, 0x20, 0x7c, + 0xb7, 0x9c, 0x64, 0x94, 0xdf, 0x11, 0x94, 0x2b, 0x1b, 0x39, 0x78, 0x87, 0x05, 0x30, 0x9c, 0x4b, + 0x1d, 0xdd, 0x86, 0x29, 0xc6, 0xaf, 0x6b, 0xdd, 0x56, 0x4b, 0x34, 0x38, 0xc9, 0x1a, 0x7c, 0x41, + 0x4a, 0x2f, 0xab, 0x26, 0xf8, 0x70, 0xbf, 0x0a, 0xf1, 0x3f, 0x9c, 0xac, 0x8d, 0xee, 0x33, 0xd3, + 0x6f, 0x37, 0x70, 0xa3, 0x3d, 0xba, 0xe9, 0xc8, 0xc3, 0xa8, 0x32, 0x55, 0xa8, 0x06, 0xd3, 0x51, + 0x95, 0x7d, 0x58, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x00, 0x0a, 0xa3, 0xa6, 0xeb, 0x55, 0xa6, 0xf9, + 0x2d, 0x4e, 0xf2, 0xef, 0x3a, 0x2d, 0xc4, 0x1c, 0xc6, 0xcc, 0xbe, 0xf4, 0xc7, 0x6d, 0x7a, 0xce, + 0xcf, 0x30, 0xc4, 0xd8, 0xec, 0x2b, 0x01, 0x38, 0xc6, 0xa1, 0xa2, 0x77, 0x14, 0xed, 0x55, 0x10, + 0x43, 0x55, 0x6c, 0x78, 0x63, 0xe3, 0x2b, 0x98, 0x96, 0xdb, 0xbf, 0x6b, 0xc1, 0x45, 0xc5, 0x46, + 0x96, 0x1f, 0x46, 0xc4, 0x6b, 0x92, 0xa6, 0xce, 0x73, 0x49, 0x18, 0xa1, 0xb7, 0x61, 0xa2, 0x21, + 0x71, 0x34, 0x13, 0xb5, 0xda, 0xa5, 0x8b, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x1a, 0xe3, 0xc6, 0x8c, + 0x9e, 0xa6, 0x6c, 0xd2, 0x59, 0xac, 0x82, 0x61, 0x03, 0x13, 0xbd, 0x09, 0x63, 0x01, 0xef, 0x01, + 0xab, 0x58, 0x36, 0x3d, 0x25, 0x70, 0x0c, 0xc2, 0x3a, 0x9e, 0x7d, 0x1f, 0x26, 0x55, 0x87, 0xd8, + 0x34, 0xa3, 0x2a, 0x0c, 0x32, 0xf9, 0x59, 0xe8, 0xa1, 0x47, 0xe9, 0xa8, 0x32, 0xd9, 0x1a, 0xf3, + 0x72, 0x36, 0xaa, 0xee, 0x23, 0xb2, 0xb0, 0x17, 0x11, 0xae, 0xd4, 0x29, 0x6b, 0xa3, 0x2a, 0x01, + 0x38, 0xc6, 0xb1, 0xff, 0x7f, 0x7e, 0x0f, 0x89, 0x8f, 0xdb, 0x3e, 0x04, 0x8c, 0x57, 0x60, 0x84, + 0x79, 0xd0, 0xf8, 0x01, 0x37, 0x73, 0x0f, 0xc6, 0x37, 0x8f, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x63, + 0xcc, 0x59, 0x15, 0x2e, 0x1d, 0xa5, 0xc7, 0x9c, 0xd5, 0x33, 0x71, 0xd1, 0x35, 0x18, 0x61, 0xce, + 0x62, 0x0d, 0xbf, 0x25, 0xc4, 0x76, 0x29, 0xe2, 0x8d, 0xd4, 0x44, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, + 0x6c, 0x74, 0x09, 0x86, 0x68, 0x17, 0x56, 0x6b, 0x42, 0x2e, 0x51, 0x2a, 0xd5, 0x1b, 0xac, 0x14, + 0x0b, 0xa8, 0xfd, 0x9b, 0x16, 0x13, 0x4a, 0xd3, 0x87, 0x27, 0xba, 0x91, 0x98, 0x6f, 0x3e, 0x20, + 0xcf, 0x67, 0xcd, 0xf7, 0x61, 0xf1, 0xfc, 0x7f, 0x35, 0x79, 0xc4, 0xf2, 0xa5, 0xf3, 0x86, 0x1c, + 0x82, 0xe4, 0x31, 0xfb, 0x4c, 0xbc, 0x6e, 0x69, 0x7f, 0x8a, 0xce, 0x5a, 0xfb, 0xb7, 0xf8, 0x35, + 0x39, 0x75, 0x7c, 0xa2, 0x25, 0x18, 0x72, 0xd8, 0x0d, 0x43, 0x74, 0xfc, 0x15, 0x39, 0x00, 0xf3, + 0xac, 0xf4, 0x50, 0xd8, 0xab, 0x93, 0xf5, 0x38, 0x14, 0x8b, 0xba, 0xe8, 0x9b, 0x30, 0x4a, 0x1e, + 0xba, 0xd1, 0xa2, 0xdf, 0x14, 0x0b, 0xca, 0xd4, 0x95, 0x16, 0x9e, 0xe0, 0xb7, 0xbd, 0x65, 0x59, + 0x95, 0x33, 0x6d, 0xf5, 0x17, 0xc7, 0x44, 0xed, 0x9f, 0xb3, 0xa0, 0xda, 0xa3, 0x36, 0xba, 0x47, + 0x85, 0x65, 0x12, 0x38, 0x91, 0x2f, 0xed, 0x9e, 0x6f, 0xcb, 0x65, 0x70, 0x5b, 0x94, 0x1f, 0xee, + 0x57, 0x5f, 0xec, 0x41, 0x46, 0xa2, 0x62, 0x45, 0x0c, 0xd9, 0x30, 0xc4, 0xd4, 0x25, 0x5c, 0xfa, + 0x1f, 0xe4, 0xc6, 0xcf, 0xbb, 0xac, 0x04, 0x0b, 0x88, 0xfd, 0x57, 0x4a, 0xda, 0x3e, 0xac, 0x47, + 0x4e, 0x44, 0x50, 0x0d, 0x86, 0x1f, 0x38, 0x6e, 0xe4, 0x7a, 0x5b, 0xe2, 0x8a, 0x52, 0x2c, 0x93, + 0xb1, 0x4a, 0xf7, 0x78, 0x05, 0x2e, 0x68, 0x8b, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0xae, 0xe7, + 0x51, 0x8a, 0xa5, 0x7e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0x3e, 0x04, + 0x90, 0xc7, 0x0a, 0x69, 0x0a, 0x35, 0xf7, 0x2b, 0xbd, 0x89, 0x6e, 0xa8, 0x3a, 0x5c, 0x8f, 0x1e, + 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0x76, 0x8d, 0xde, 0x19, 0xf4, 0x35, 0xca, 0xd7, 0x9d, 0x20, + 0x22, 0xcd, 0xf9, 0x48, 0x0c, 0xce, 0x67, 0xfb, 0xd3, 0x63, 0x6c, 0xb8, 0x6d, 0xa2, 0x9f, 0x01, + 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x5e, 0x86, 0x4a, 0x5e, 0x77, 0x29, 0x5b, 0x92, 0xab, 0x4a, + 0xd8, 0x1f, 0x14, 0x5b, 0x92, 0x4b, 0x00, 0x2b, 0x0c, 0xca, 0x1f, 0x42, 0x77, 0x4b, 0xaa, 0xa1, + 0x06, 0x63, 0xfe, 0x50, 0x67, 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x40, 0x9c, 0x50, 0xf8, 0x89, 0x6a, + 0x7c, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0x42, 0x7c, 0xa0, 0x87, 0x42, 0xdc, 0x18, 0xa2, 0xc1, + 0x27, 0x3b, 0x44, 0xe8, 0x1b, 0x00, 0x9b, 0xae, 0xe7, 0x86, 0xdb, 0x8c, 0xfa, 0xd0, 0x91, 0xa9, + 0xab, 0xfb, 0xdb, 0x8a, 0xa2, 0x82, 0x35, 0x8a, 0xf4, 0x2c, 0x53, 0x2c, 0x7a, 0x75, 0x89, 0x79, + 0xa9, 0x68, 0x67, 0x59, 0x7c, 0x5e, 0x2d, 0x61, 0x1d, 0xcf, 0xfe, 0x56, 0x72, 0xbd, 0x88, 0x1d, + 0xa0, 0x8d, 0xaf, 0xd5, 0xef, 0xf8, 0x96, 0x8a, 0xc7, 0xd7, 0xfe, 0x17, 0xa3, 0x30, 0x65, 0x34, + 0xd6, 0x0d, 0xfb, 0x38, 0xd5, 0xae, 0x53, 0xa9, 0xc5, 0x89, 0x88, 0xd8, 0x7f, 0x76, 0xef, 0xad, + 0xa2, 0x4b, 0x36, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x1b, 0x30, 0xda, 0x72, 0x42, 0xa6, 0x5c, 0x27, + 0x62, 0xdf, 0xf5, 0x43, 0x2c, 0xd6, 0x5d, 0x38, 0x61, 0xa4, 0x89, 0x8a, 0x9c, 0x76, 0x4c, 0x92, + 0x8a, 0x57, 0x54, 0x28, 0x97, 0x8e, 0xc8, 0xaa, 0x13, 0x54, 0x72, 0xdf, 0xc3, 0x1c, 0x26, 0x84, + 0x15, 0xba, 0x2a, 0x16, 0xe9, 0x15, 0x86, 0x2d, 0xb3, 0x41, 0x43, 0x58, 0x51, 0x30, 0x6c, 0x60, + 0xc6, 0xea, 0x83, 0xa1, 0x02, 0xf5, 0xc1, 0x4b, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0x63, + 0x95, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0x91, 0xfe, 0x16, 0x0c, 0x7a, 0x01, 0x86, 0xc5, 0xa2, + 0x66, 0x1e, 0x42, 0x23, 0x9c, 0xcb, 0x89, 0x25, 0x8f, 0x25, 0x0c, 0xfd, 0xbc, 0x05, 0xc8, 0x69, + 0xb5, 0xfc, 0x06, 0xe3, 0x50, 0xea, 0x1e, 0x0e, 0xec, 0x7e, 0xf6, 0x76, 0xcf, 0x61, 0xef, 0x86, + 0x73, 0xf3, 0xa9, 0xda, 0x5c, 0xa9, 0xff, 0x96, 0xbc, 0x7e, 0xa6, 0x11, 0xf4, 0xe3, 0xfe, 0x96, + 0x1b, 0x46, 0xdf, 0xfe, 0x57, 0x89, 0xe3, 0x3f, 0xa3, 0x4b, 0xe8, 0x8e, 0xae, 0x27, 0x18, 0x3b, + 0xa2, 0x9e, 0x60, 0x22, 0x57, 0x47, 0xf0, 0xe7, 0x12, 0xb7, 0xde, 0x71, 0xf6, 0xe5, 0x2f, 0xf4, + 0xb8, 0xf5, 0x0a, 0xcb, 0x4f, 0x3f, 0x77, 0xdf, 0x9a, 0x70, 0x59, 0x98, 0x60, 0x5d, 0x2e, 0xd6, + 0xd7, 0xdc, 0x09, 0x49, 0xb0, 0x70, 0x56, 0x7a, 0x34, 0x1c, 0xea, 0xd2, 0x9d, 0xe6, 0xe2, 0xf0, + 0xe3, 0x16, 0x54, 0xd2, 0x03, 0xc4, 0xbb, 0x54, 0x99, 0x64, 0xfd, 0xb7, 0x8b, 0x46, 0x46, 0x74, + 0x5e, 0x7a, 0x66, 0x57, 0xe6, 0x73, 0x68, 0xe1, 0xdc, 0x56, 0xd0, 0x35, 0x80, 0x30, 0xf2, 0x3b, + 0x9c, 0xd7, 0xb3, 0x1b, 0xd0, 0x28, 0xf3, 0x0d, 0x82, 0xba, 0x2a, 0x3d, 0x8c, 0xcf, 0x02, 0x0d, + 0x77, 0xb6, 0x0b, 0x67, 0x72, 0x56, 0x4c, 0x86, 0x69, 0x66, 0x49, 0x37, 0xcd, 0xf4, 0x50, 0xe8, + 0xcf, 0xc9, 0x39, 0x9d, 0xfb, 0xa0, 0xeb, 0x78, 0x91, 0x1b, 0xed, 0xe9, 0xa6, 0x1c, 0x0f, 0xcc, + 0xa1, 0x44, 0x5f, 0x87, 0xc1, 0x96, 0xeb, 0x75, 0x1f, 0x8a, 0x33, 0xf6, 0x52, 0xf6, 0x9d, 0xd9, + 0xeb, 0x3e, 0x34, 0x27, 0xa7, 0x4a, 0xb7, 0x32, 0x2b, 0x3f, 0xdc, 0xaf, 0xa2, 0x34, 0x02, 0xe6, + 0x54, 0xed, 0xcf, 0xc2, 0xe4, 0x92, 0x43, 0xda, 0xbe, 0xb7, 0xec, 0x35, 0x3b, 0xbe, 0xeb, 0x45, + 0xa8, 0x02, 0x03, 0x4c, 0x7c, 0xe7, 0x47, 0xeb, 0x00, 0x1d, 0x7c, 0xcc, 0x4a, 0xec, 0x2d, 0x38, + 0xb5, 0xe4, 0x3f, 0xf0, 0x1e, 0x38, 0x41, 0x73, 0xbe, 0xb6, 0xaa, 0xa9, 0xb6, 0xd7, 0xa5, 0x6a, + 0xd5, 0xca, 0x57, 0x5c, 0x69, 0x35, 0xf9, 0x22, 0x5c, 0x71, 0x5b, 0x24, 0xc7, 0x00, 0xf1, 0xd7, + 0x4b, 0x46, 0x4b, 0x31, 0xbe, 0x32, 0x9f, 0x5b, 0xb9, 0x9e, 0x37, 0x1f, 0xc0, 0xc8, 0xa6, 0x4b, + 0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x6c, 0xbc, 0x98, 0xef, 0x9b, 0xbb, 0x42, 0x31, 0x95, 0x9d, 0x9f, + 0x29, 0x66, 0x57, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x96, 0x73, 0x26, 0xa1, 0x82, 0xdf, + 0xbf, 0x54, 0xb4, 0x7c, 0x4d, 0xe2, 0xec, 0x9d, 0x02, 0x4e, 0x90, 0xc1, 0x29, 0xc2, 0xe8, 0x1c, + 0x0c, 0xb4, 0xa9, 0x64, 0x33, 0xc0, 0x86, 0x9f, 0x69, 0x62, 0x99, 0x52, 0x99, 0x95, 0xda, 0x7f, + 0xc3, 0x82, 0x33, 0xa9, 0x91, 0x11, 0xca, 0xf5, 0x27, 0x3c, 0x0b, 0x49, 0x65, 0x77, 0xa9, 0xb7, + 0xb2, 0xdb, 0xfe, 0x2f, 0x2c, 0x38, 0xb9, 0xdc, 0xee, 0x44, 0x7b, 0x4b, 0xae, 0xe9, 0x26, 0xf3, + 0x05, 0x18, 0x6a, 0x93, 0xa6, 0xdb, 0x6d, 0x8b, 0x99, 0xab, 0xca, 0xd3, 0x7f, 0x8d, 0x95, 0x52, + 0x0e, 0x52, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, 0x26, 0x43, 0xb9, 0x8f, 0xc8, + 0x2d, 0xb7, 0xed, 0x46, 0x8f, 0xb7, 0xbb, 0x84, 0x87, 0x8b, 0x24, 0x82, 0x63, 0x7a, 0xf6, 0xf7, + 0x2d, 0x98, 0x92, 0xeb, 0x7e, 0xbe, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0x28, 0xb9, 0x1d, 0xd1, + 0x4b, 0x10, 0xbd, 0x2c, 0xad, 0xd6, 0x70, 0xc9, 0xed, 0xc8, 0x0b, 0xb1, 0x17, 0x5f, 0xee, 0x8d, + 0x0b, 0xb1, 0xc7, 0xde, 0x4c, 0x48, 0x0c, 0x74, 0x19, 0x46, 0x3c, 0xbf, 0xc9, 0xef, 0x94, 0xc2, + 0xdd, 0x83, 0x62, 0xae, 0x8b, 0x32, 0xac, 0xa0, 0xa8, 0x06, 0xa3, 0xdc, 0x15, 0x3c, 0x5e, 0xb4, + 0x7d, 0x39, 0x94, 0xb3, 0x2f, 0xdb, 0x90, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0xa7, 0x16, 0x8c, 0xcb, + 0x2f, 0xeb, 0xf3, 0xb6, 0x4f, 0xb7, 0x56, 0x7c, 0xd3, 0x8f, 0xb7, 0x16, 0xbd, 0xad, 0x33, 0x88, + 0x71, 0x49, 0x2f, 0x1f, 0xe9, 0x92, 0x7e, 0x15, 0xc6, 0x9c, 0x4e, 0xa7, 0x66, 0xde, 0xf0, 0xd9, + 0x52, 0x9a, 0x8f, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x6c, 0x09, 0x26, 0xe5, 0x17, 0xd4, 0xbb, 0xf7, + 0x43, 0x12, 0xa1, 0x0d, 0x18, 0x75, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x9f, 0xcb, 0x56, 0xe1, 0x1b, + 0x53, 0x1a, 0x0b, 0xd2, 0xf3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x16, 0xcc, 0x78, 0x7e, 0xc4, 0x84, + 0x2a, 0x05, 0x2f, 0xf2, 0xaa, 0x48, 0x52, 0x3f, 0x2b, 0xa8, 0xcf, 0xac, 0x27, 0xa9, 0xe0, 0x34, + 0x61, 0xb4, 0x2c, 0xcd, 0x22, 0xe5, 0x7c, 0xcd, 0xb2, 0x3e, 0x71, 0xd9, 0x56, 0x11, 0xfb, 0x9f, + 0x58, 0x30, 0x2a, 0xd1, 0x8e, 0xc3, 0x81, 0x66, 0x0d, 0x86, 0x43, 0x36, 0x09, 0x72, 0x68, 0xec, + 0xa2, 0x8e, 0xf3, 0xf9, 0x8a, 0x65, 0x45, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0xb3, 0x8a, 0xab, 0xee, + 0x7f, 0x42, 0xac, 0xe2, 0xaa, 0x3f, 0x39, 0x87, 0xd2, 0xbf, 0x61, 0x7d, 0xd6, 0xcc, 0x4c, 0xf4, + 0x4a, 0xd3, 0x09, 0xc8, 0xa6, 0xfb, 0x30, 0x79, 0xa5, 0xa9, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x43, + 0x18, 0x6f, 0x48, 0x73, 0x68, 0xbc, 0xc3, 0x2f, 0x15, 0x9a, 0xe6, 0x95, 0x17, 0x07, 0x57, 0xac, + 0x2f, 0x6a, 0xf5, 0xb1, 0x41, 0xcd, 0x74, 0x75, 0x2c, 0xf7, 0x72, 0x75, 0x8c, 0xe9, 0xe6, 0x3b, + 0xfe, 0xfd, 0x9c, 0x05, 0x43, 0xdc, 0x0c, 0xd6, 0x9f, 0x15, 0x52, 0x73, 0x6a, 0x89, 0xc7, 0x8e, + 0x29, 0x57, 0x84, 0x64, 0x83, 0xd6, 0x60, 0x94, 0xfd, 0x60, 0x66, 0xbc, 0x72, 0xfe, 0xc3, 0x48, + 0xde, 0xaa, 0xde, 0xc1, 0xbb, 0xb2, 0x1a, 0x8e, 0x29, 0xd8, 0x7f, 0x54, 0xa6, 0xdc, 0x2d, 0x46, + 0x35, 0x0e, 0x7d, 0xeb, 0xe9, 0x1d, 0xfa, 0xa5, 0xa7, 0x75, 0xe8, 0x6f, 0xc1, 0x54, 0x43, 0x73, + 0x81, 0x89, 0x67, 0xf2, 0x72, 0xe1, 0x22, 0xd1, 0xbc, 0x65, 0xb8, 0xca, 0x7e, 0xd1, 0x24, 0x82, + 0x93, 0x54, 0xd1, 0xd7, 0x60, 0x9c, 0xcf, 0xb3, 0x68, 0x85, 0x7b, 0x8b, 0xbe, 0x90, 0xbf, 0x5e, + 0xf4, 0x26, 0xb8, 0x89, 0x47, 0xab, 0x8e, 0x0d, 0x62, 0xa8, 0x0e, 0xb0, 0xe9, 0xb6, 0x88, 0x20, + 0x5d, 0xe0, 0xd8, 0xbd, 0xc2, 0xb1, 0x14, 0xe1, 0x49, 0xae, 0x87, 0x90, 0x55, 0xb1, 0x46, 0xc6, + 0xfe, 0x77, 0x16, 0xa0, 0xe5, 0xce, 0x36, 0x69, 0x93, 0xc0, 0x69, 0xc5, 0xe6, 0xf1, 0x9f, 0xb4, + 0xa0, 0x42, 0x52, 0xc5, 0x8b, 0x7e, 0xbb, 0x2d, 0x34, 0x0c, 0x39, 0x4a, 0xb0, 0xe5, 0x9c, 0x3a, + 0xf1, 0x2d, 0x23, 0x0f, 0x03, 0xe7, 0xb6, 0x87, 0xd6, 0xe0, 0x04, 0x3f, 0x7a, 0x0d, 0xbb, 0x82, + 0xd8, 0x11, 0xcf, 0x08, 0xc2, 0x27, 0x36, 0xd2, 0x28, 0x38, 0xab, 0x9e, 0xfd, 0x0f, 0x26, 0x21, + 0xb7, 0x17, 0x9f, 0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0xc0, 0xa7, 0x7e, 0x01, 0x9f, + 0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0x80, 0xe6, 0x17, 0xf0, 0x57, 0x2d, 0x38, 0xa5, + 0x0e, 0x4d, 0x43, 0xf7, 0xf0, 0xa3, 0x70, 0x82, 0x6f, 0xb7, 0xc5, 0x96, 0xe3, 0xb6, 0x37, 0x48, + 0xbb, 0xd3, 0x72, 0x22, 0xe9, 0x73, 0x78, 0x35, 0x73, 0xe5, 0x26, 0x1e, 0x36, 0x19, 0x15, 0xf9, + 0x0b, 0xd1, 0x0c, 0x00, 0xce, 0x6a, 0xc6, 0xfe, 0xf5, 0x11, 0x18, 0x5c, 0xde, 0x25, 0x5e, 0x74, + 0x0c, 0xb7, 0xb4, 0x06, 0x4c, 0xba, 0xde, 0xae, 0xdf, 0xda, 0x25, 0x4d, 0x0e, 0x3f, 0x8a, 0x32, + 0xe1, 0xb4, 0x20, 0x3d, 0xb9, 0x6a, 0x90, 0xc0, 0x09, 0x92, 0x4f, 0xc3, 0x50, 0x76, 0x1d, 0x86, + 0xf8, 0x91, 0x27, 0x84, 0xc6, 0x4c, 0x9e, 0xcd, 0x06, 0x51, 0x1c, 0xe4, 0xb1, 0x11, 0x8f, 0x1f, + 0xa9, 0xa2, 0x3a, 0xfa, 0x16, 0x4c, 0x6e, 0xba, 0x41, 0x18, 0x6d, 0xb8, 0x6d, 0x7a, 0x3e, 0xb4, + 0x3b, 0x8f, 0x61, 0x18, 0x53, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x0b, 0x26, 0x5a, + 0x8e, 0xde, 0xd4, 0xf0, 0x91, 0x9b, 0x52, 0xa7, 0xc3, 0x2d, 0x9d, 0x10, 0x36, 0xe9, 0xd2, 0xed, + 0xd4, 0x60, 0xb6, 0x9d, 0x11, 0xa6, 0x99, 0x51, 0xdb, 0x89, 0x1b, 0x75, 0x38, 0x8c, 0x8a, 0x85, + 0xec, 0x79, 0xd0, 0xa8, 0x29, 0x16, 0x6a, 0x8f, 0x80, 0xbe, 0x09, 0xa3, 0x84, 0x0e, 0x21, 0x25, + 0x2c, 0x0e, 0x98, 0x2b, 0xfd, 0xf5, 0x75, 0xcd, 0x6d, 0x04, 0xbe, 0x69, 0x92, 0x5c, 0x96, 0x94, + 0x70, 0x4c, 0x14, 0x2d, 0xc2, 0x50, 0x48, 0x02, 0x57, 0x99, 0x3d, 0x0a, 0xa6, 0x91, 0xa1, 0x71, + 0x2b, 0x3c, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x84, 0x3b, 0xc3, 0xb8, 0xb9, 0xbc, 0x12, 0x0e, + 0x0b, 0xef, 0xc3, 0x70, 0x40, 0x5a, 0xcc, 0xe6, 0x3d, 0xd1, 0xff, 0x22, 0xe7, 0x26, 0x74, 0x5e, + 0x0f, 0x4b, 0x02, 0xe8, 0x26, 0x95, 0x57, 0xa8, 0x58, 0xe9, 0x7a, 0x5b, 0xea, 0xd1, 0x8c, 0x60, + 0xb4, 0x4a, 0x7c, 0xc7, 0x31, 0x86, 0x7c, 0x7d, 0x8e, 0x33, 0xaa, 0xa1, 0xeb, 0x30, 0xa3, 0x4a, + 0x57, 0xbd, 0x30, 0x72, 0x28, 0x83, 0xe3, 0x96, 0x07, 0xa5, 0x2a, 0xc2, 0x49, 0x04, 0x9c, 0xae, + 0x63, 0xff, 0xa2, 0x05, 0x7c, 0x9c, 0x8f, 0x41, 0x41, 0xf2, 0xae, 0xa9, 0x20, 0x39, 0x9b, 0x3b, + 0x73, 0x39, 0xca, 0x91, 0x5f, 0xb4, 0x60, 0x4c, 0x9b, 0xd9, 0x78, 0xcd, 0x5a, 0x05, 0x6b, 0xb6, + 0x0b, 0xd3, 0x74, 0xa5, 0xdf, 0xbe, 0x1f, 0x92, 0x60, 0x97, 0x34, 0xd9, 0xc2, 0x2c, 0x3d, 0xde, + 0xc2, 0x54, 0x0e, 0xfa, 0xb7, 0x12, 0x04, 0x71, 0xaa, 0x09, 0xfb, 0x9b, 0xb2, 0xab, 0xea, 0x3d, + 0x43, 0x43, 0xcd, 0x79, 0xe2, 0x3d, 0x83, 0x9a, 0x55, 0x1c, 0xe3, 0xd0, 0xad, 0xb6, 0xed, 0x87, + 0x51, 0xf2, 0x3d, 0xc3, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xaf, 0x03, 0x2c, 0x3f, 0x24, 0x0d, + 0xbe, 0x62, 0xf5, 0xab, 0x96, 0x95, 0x7f, 0xd5, 0xb2, 0x7f, 0xcf, 0x82, 0xc9, 0x95, 0x45, 0xe3, + 0xe4, 0x9a, 0x03, 0xe0, 0xf7, 0xc3, 0x7b, 0xf7, 0xd6, 0xa5, 0x2f, 0x18, 0x77, 0xd6, 0x50, 0xa5, + 0x58, 0xc3, 0x40, 0x67, 0xa1, 0xdc, 0xea, 0x7a, 0x42, 0x83, 0x3b, 0x4c, 0x8f, 0xc7, 0x5b, 0x5d, + 0x0f, 0xd3, 0x32, 0xed, 0xe5, 0x69, 0xb9, 0xef, 0x97, 0xa7, 0x3d, 0x03, 0x60, 0xa1, 0x2a, 0x0c, + 0x3e, 0x78, 0xe0, 0x36, 0x79, 0x5c, 0x0f, 0xe1, 0xa7, 0x76, 0xef, 0xde, 0xea, 0x52, 0x88, 0x79, + 0xb9, 0xfd, 0xcb, 0x16, 0x4c, 0x25, 0x6e, 0xfb, 0xf4, 0xd6, 0xb8, 0xab, 0xa2, 0x2a, 0x25, 0x83, + 0xc7, 0x68, 0xf1, 0x96, 0x34, 0xac, 0x3e, 0x5e, 0x5c, 0x8b, 0x17, 0x3b, 0xe5, 0x3e, 0x5e, 0xec, + 0x14, 0xbb, 0xe1, 0x7f, 0xaf, 0x0c, 0xb3, 0x2b, 0x2d, 0xf2, 0xf0, 0x63, 0x86, 0x63, 0xe9, 0xf7, + 0xa9, 0xef, 0xd1, 0xd4, 0x77, 0x47, 0x7d, 0xce, 0xdd, 0x7b, 0x0a, 0x37, 0x61, 0x98, 0x7f, 0xba, + 0x0c, 0xce, 0x92, 0x69, 0x4c, 0xcf, 0x1f, 0x90, 0x39, 0x3e, 0x84, 0xc2, 0x98, 0xae, 0xce, 0x78, + 0x51, 0x8a, 0x25, 0xf1, 0xd9, 0xb7, 0x60, 0x5c, 0xc7, 0x3c, 0x52, 0x60, 0x85, 0xbf, 0x50, 0x86, + 0x69, 0xda, 0x83, 0xa7, 0x3a, 0x11, 0x77, 0xd2, 0x13, 0xf1, 0xa4, 0x1f, 0xd7, 0xf7, 0x9e, 0x8d, + 0x0f, 0x93, 0xb3, 0x71, 0x35, 0x6f, 0x36, 0x8e, 0x7b, 0x0e, 0xfe, 0xa2, 0x05, 0x27, 0x56, 0x5a, + 0x7e, 0x63, 0x27, 0xf1, 0x00, 0xfe, 0x4d, 0x18, 0xa3, 0x27, 0x48, 0x68, 0xc4, 0x82, 0x32, 0xa2, + 0x83, 0x09, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0xac, 0x2e, 0x65, 0x05, 0x15, 0x13, 0x20, + 0xac, 0xe3, 0xd9, 0xff, 0xdc, 0x82, 0xf3, 0xd7, 0x17, 0x97, 0xe3, 0xa5, 0x98, 0x8a, 0x6b, 0x76, + 0x09, 0x86, 0x3a, 0x4d, 0xad, 0x2b, 0xb1, 0x52, 0x7e, 0x89, 0xf5, 0x42, 0x40, 0x3f, 0x29, 0x21, + 0x04, 0xef, 0x00, 0x5c, 0xc7, 0xb5, 0x45, 0x71, 0x54, 0x48, 0x1b, 0x9c, 0x95, 0x6b, 0x83, 0x7b, + 0x01, 0x86, 0xe9, 0x51, 0xe6, 0x36, 0x64, 0xbf, 0xb9, 0xbb, 0x0c, 0x2f, 0xc2, 0x12, 0x66, 0xff, + 0x82, 0x05, 0x27, 0xae, 0xbb, 0x11, 0x95, 0x33, 0x92, 0x81, 0xbb, 0xa8, 0xa0, 0x11, 0xba, 0x91, + 0x1f, 0xec, 0x25, 0x79, 0x2f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x0f, 0xda, 0x75, 0xd9, 0x93, 0xba, + 0x92, 0x69, 0xf5, 0xc4, 0xa2, 0x1c, 0x2b, 0x0c, 0x3a, 0x5e, 0x4d, 0x37, 0x60, 0x9c, 0x5e, 0x72, + 0x63, 0x35, 0x5e, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x8f, 0x2d, 0xa8, 0x5e, 0xe7, 0x81, 0x01, + 0x36, 0xc3, 0x1c, 0xa6, 0xfb, 0x3a, 0x8c, 0x12, 0x69, 0x9e, 0x49, 0xfa, 0x72, 0x2b, 0xbb, 0x0d, + 0x8f, 0x1f, 0xa6, 0xf0, 0xfa, 0x38, 0x33, 0x8e, 0x16, 0x66, 0x61, 0x05, 0x10, 0xd1, 0xdb, 0xd2, + 0x03, 0xaa, 0xb1, 0xc8, 0x4c, 0xcb, 0x29, 0x28, 0xce, 0xa8, 0x61, 0xff, 0x0d, 0x0b, 0x4e, 0xa9, + 0x0f, 0xfe, 0xc4, 0x7d, 0xa6, 0xfd, 0xab, 0x25, 0x98, 0xb8, 0xb1, 0xb1, 0x51, 0xbb, 0x4e, 0x22, + 0x6d, 0x55, 0x16, 0x3b, 0x5d, 0x60, 0xcd, 0x76, 0x5c, 0x74, 0xad, 0xed, 0x46, 0x6e, 0x6b, 0x8e, + 0x87, 0x09, 0x9d, 0x5b, 0xf5, 0xa2, 0xdb, 0x41, 0x3d, 0x0a, 0x5c, 0x6f, 0x2b, 0x73, 0xa5, 0x4b, + 0x31, 0xab, 0x9c, 0x27, 0x66, 0xa1, 0xd7, 0x61, 0x88, 0xc5, 0x29, 0x95, 0x93, 0xf0, 0x8c, 0xba, + 0x15, 0xb2, 0xd2, 0xc3, 0xfd, 0xea, 0xe8, 0x1d, 0xbc, 0xca, 0xff, 0x60, 0x81, 0x8a, 0xee, 0xc0, + 0xd8, 0x76, 0x14, 0x75, 0x6e, 0x10, 0xa7, 0x49, 0x02, 0xc9, 0x65, 0x2f, 0x64, 0x71, 0x59, 0x3a, + 0x08, 0x1c, 0x2d, 0x66, 0x4c, 0x71, 0x59, 0x88, 0x75, 0x3a, 0x76, 0x1d, 0x20, 0x86, 0x3d, 0x21, + 0xb3, 0x99, 0xbd, 0x01, 0xa3, 0xf4, 0x73, 0xe7, 0x5b, 0xae, 0x53, 0xec, 0x98, 0xf0, 0x32, 0x8c, + 0x4a, 0xb7, 0x83, 0x50, 0x44, 0x11, 0x62, 0x27, 0x92, 0xf4, 0x4a, 0x08, 0x71, 0x0c, 0xb7, 0x9f, + 0x07, 0xe1, 0x1b, 0x5f, 0x44, 0xd2, 0xde, 0x84, 0x93, 0xcc, 0xc9, 0xdf, 0x89, 0xb6, 0x8d, 0x35, + 0xda, 0x7b, 0x31, 0xbc, 0x22, 0xae, 0xa2, 0x25, 0xe5, 0x6d, 0x25, 0xa3, 0x54, 0x8c, 0x4b, 0x8a, + 0xf1, 0xb5, 0xd4, 0xfe, 0xa3, 0x01, 0x78, 0x66, 0xb5, 0x9e, 0x1f, 0xfe, 0xee, 0x1a, 0x8c, 0x73, + 0x09, 0x97, 0x2e, 0x0d, 0xa7, 0x25, 0xda, 0x55, 0x4a, 0xdb, 0x0d, 0x0d, 0x86, 0x0d, 0x4c, 0x2a, + 0x11, 0xba, 0x1f, 0x79, 0xc9, 0x37, 0xdc, 0xab, 0x1f, 0xac, 0x63, 0x5a, 0x4e, 0xc1, 0x54, 0x58, + 0xe6, 0x2c, 0x5d, 0x81, 0x95, 0xc0, 0xfc, 0x2e, 0x4c, 0xba, 0x61, 0x23, 0x74, 0x57, 0x3d, 0xba, + 0x4f, 0xb5, 0x9d, 0xae, 0xd4, 0x24, 0xb4, 0xd3, 0x0a, 0x8a, 0x13, 0xd8, 0xda, 0xf9, 0x32, 0xd8, + 0xb7, 0xc0, 0xdd, 0x33, 0xf8, 0x0e, 0x65, 0xff, 0x1d, 0xf6, 0x75, 0x21, 0xb3, 0x55, 0x08, 0xf6, + 0xcf, 0x3f, 0x38, 0xc4, 0x12, 0x46, 0xef, 0xa0, 0x8d, 0x6d, 0xa7, 0x33, 0xdf, 0x8d, 0xb6, 0x97, + 0xdc, 0xb0, 0xe1, 0xef, 0x92, 0x60, 0x8f, 0xa9, 0x0f, 0x46, 0xe2, 0x3b, 0xa8, 0x02, 0x2c, 0xde, + 0x98, 0xaf, 0x51, 0x4c, 0x9c, 0xae, 0x83, 0xe6, 0x61, 0x4a, 0x16, 0xd6, 0x49, 0xc8, 0x8e, 0x80, + 0x31, 0x46, 0x46, 0xbd, 0xaa, 0x16, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x14, 0x70, 0xe1, 0x49, 0x08, + 0xb8, 0x5f, 0x80, 0x09, 0xd7, 0x73, 0x23, 0xd7, 0x89, 0x7c, 0x6e, 0x68, 0xe3, 0x9a, 0x02, 0xa6, + 0x13, 0x5f, 0xd5, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0x8f, 0x01, 0x98, 0x61, 0xd3, 0xf6, 0xe9, 0x0a, + 0xfb, 0x61, 0x5a, 0x61, 0x77, 0xd2, 0x2b, 0xec, 0x49, 0x48, 0xee, 0x8f, 0xbd, 0xcc, 0xbe, 0x63, + 0xc1, 0x0c, 0x53, 0xcb, 0x1b, 0xcb, 0xec, 0x0a, 0x8c, 0x06, 0xc6, 0x83, 0xf7, 0x51, 0xdd, 0xfa, + 0x27, 0xdf, 0xae, 0xc7, 0x38, 0xe8, 0x3d, 0x80, 0x4e, 0xac, 0xf6, 0x2f, 0x19, 0x51, 0x8a, 0x21, + 0x57, 0xe3, 0xaf, 0xd5, 0xb1, 0xbf, 0x05, 0xa3, 0xea, 0x45, 0xbb, 0xbc, 0x20, 0x5b, 0x39, 0x17, + 0xe4, 0xde, 0x62, 0x84, 0xf4, 0x4c, 0x2c, 0x67, 0x7a, 0x26, 0xfe, 0x6b, 0x0b, 0x62, 0xa3, 0x0c, + 0xfa, 0x00, 0x46, 0x3b, 0x3e, 0x73, 0x64, 0x0f, 0xe4, 0xeb, 0x90, 0xe7, 0x0b, 0xad, 0x3a, 0x3c, + 0x14, 0x69, 0xc0, 0xa7, 0xa3, 0x26, 0xab, 0xe2, 0x98, 0x0a, 0xba, 0x09, 0xc3, 0x9d, 0x80, 0xd4, + 0x23, 0x16, 0x27, 0xaf, 0x7f, 0x82, 0x7c, 0xf9, 0xf2, 0x8a, 0x58, 0x52, 0x48, 0xf8, 0x05, 0x97, + 0xfb, 0xf7, 0x0b, 0xb6, 0xff, 0x7e, 0x09, 0xa6, 0x93, 0x8d, 0xa0, 0x77, 0x60, 0x80, 0x3c, 0x24, + 0x0d, 0xf1, 0xa5, 0x99, 0xd2, 0x44, 0xac, 0x10, 0xe2, 0x43, 0x47, 0xff, 0x63, 0x56, 0x0b, 0xdd, + 0x80, 0x61, 0x2a, 0x4a, 0x5c, 0x57, 0xd1, 0x64, 0x9f, 0xcd, 0x13, 0x47, 0x94, 0x4c, 0xc6, 0x3f, + 0x4b, 0x14, 0x61, 0x59, 0x9d, 0x39, 0x12, 0x36, 0x3a, 0x75, 0x7a, 0x4b, 0x8b, 0x8a, 0x94, 0x09, + 0x1b, 0x8b, 0x35, 0x8e, 0x24, 0xa8, 0x71, 0x47, 0x42, 0x59, 0x88, 0x63, 0x22, 0xe8, 0x3d, 0x18, + 0x0c, 0x5b, 0x84, 0x74, 0x84, 0xa7, 0x48, 0xa6, 0x4a, 0xb7, 0x4e, 0x11, 0x04, 0x25, 0xa6, 0x02, + 0x62, 0x05, 0x98, 0x57, 0xb4, 0x7f, 0xcd, 0x02, 0xe0, 0x9e, 0x97, 0x8e, 0xb7, 0x45, 0x8e, 0xc1, + 0x0a, 0xb2, 0x04, 0x03, 0x61, 0x87, 0x34, 0x8a, 0xde, 0x77, 0xc4, 0xfd, 0xa9, 0x77, 0x48, 0x23, + 0x5e, 0xed, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0x9f, 0x00, 0x98, 0x8c, 0xd1, 0x56, 0x23, 0xd2, 0x46, + 0xaf, 0x1a, 0x21, 0xb8, 0xce, 0x26, 0x42, 0x70, 0x8d, 0x32, 0x6c, 0x4d, 0xe1, 0xfe, 0x2d, 0x28, + 0xb7, 0x9d, 0x87, 0x42, 0xa3, 0xfa, 0x72, 0x71, 0x37, 0x28, 0xfd, 0xb9, 0x35, 0xe7, 0x21, 0xbf, + 0xc1, 0xbf, 0x2c, 0x77, 0xe7, 0x9a, 0xf3, 0xb0, 0xe7, 0x1b, 0x04, 0xda, 0x08, 0x6b, 0xcb, 0xf5, + 0x84, 0x53, 0x61, 0x5f, 0x6d, 0xb9, 0x5e, 0xb2, 0x2d, 0xd7, 0xeb, 0xa3, 0x2d, 0xd7, 0x43, 0x8f, + 0x60, 0x58, 0xf8, 0xfc, 0x8a, 0xd8, 0xa0, 0x57, 0xfa, 0x68, 0x4f, 0xb8, 0x0c, 0xf3, 0x36, 0xaf, + 0x48, 0x0d, 0x85, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x35, 0x0b, 0x26, 0xc5, 0x6f, 0xf1, + 0x9c, 0x56, 0x48, 0xf0, 0x9f, 0xef, 0xbf, 0x0f, 0xa2, 0x22, 0xef, 0xca, 0xe7, 0xe5, 0x61, 0x6b, + 0x02, 0x7b, 0xf6, 0x28, 0xd1, 0x0b, 0xf4, 0xf7, 0x2d, 0x38, 0xd9, 0x76, 0x1e, 0xf2, 0x16, 0x79, + 0x19, 0x76, 0x22, 0xd7, 0x17, 0x6e, 0x2e, 0xef, 0xf4, 0x37, 0xfd, 0xa9, 0xea, 0xbc, 0x93, 0xd2, + 0xba, 0x7c, 0x32, 0x0b, 0xa5, 0x67, 0x57, 0x33, 0xfb, 0x35, 0xbb, 0x09, 0x23, 0x72, 0xbd, 0x3d, + 0xcd, 0x07, 0x0d, 0xac, 0x1d, 0xb1, 0xd6, 0x9e, 0x6a, 0x3b, 0xdf, 0x82, 0x71, 0x7d, 0x8d, 0x3d, + 0xd5, 0xb6, 0x3e, 0x82, 0x13, 0x19, 0x6b, 0xe9, 0xa9, 0x36, 0xf9, 0x00, 0xce, 0xe6, 0xae, 0x8f, + 0xa7, 0xfa, 0x20, 0xe5, 0x57, 0x2d, 0x9d, 0x0f, 0x1e, 0x83, 0x29, 0x6a, 0xd1, 0x34, 0x45, 0x5d, + 0x28, 0xde, 0x39, 0x39, 0xf6, 0xa8, 0x0f, 0xf5, 0x4e, 0x53, 0xae, 0x8e, 0xde, 0x87, 0xa1, 0x16, + 0x2d, 0x91, 0x9e, 0xe3, 0x76, 0xef, 0x1d, 0x19, 0x4b, 0xd4, 0xac, 0x3c, 0xc4, 0x82, 0x82, 0xfd, + 0x33, 0x16, 0x64, 0x3c, 0xa9, 0xa1, 0x12, 0x56, 0xd7, 0x6d, 0xb2, 0x21, 0x29, 0xc7, 0x12, 0x96, + 0x8a, 0x50, 0x75, 0x1e, 0xca, 0x5b, 0x6e, 0x53, 0xbc, 0xd6, 0x57, 0xe0, 0xeb, 0x14, 0xbc, 0xe5, + 0x36, 0xd1, 0x0a, 0xa0, 0xb0, 0xdb, 0xe9, 0xb4, 0x98, 0x67, 0x98, 0xd3, 0xba, 0x1e, 0xf8, 0xdd, + 0x0e, 0x77, 0x13, 0x2f, 0x73, 0xf5, 0x52, 0x3d, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x7f, 0x64, 0xc1, + 0xc0, 0x31, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xd5, 0x5c, 0xd2, 0x22, 0xa5, 0xcc, 0x1c, 0x76, 0x1e, + 0xb0, 0x70, 0x0d, 0x21, 0x13, 0x38, 0x32, 0x67, 0x6d, 0xdf, 0x82, 0x13, 0xb7, 0x7c, 0xa7, 0xb9, + 0xe0, 0xb4, 0x1c, 0xaf, 0x41, 0x82, 0x55, 0x6f, 0xeb, 0x48, 0x6f, 0x32, 0x4a, 0x3d, 0xdf, 0x64, + 0x5c, 0x83, 0x21, 0xb7, 0xa3, 0xe5, 0xa4, 0xb8, 0x48, 0x67, 0x77, 0xb5, 0x26, 0xd2, 0x51, 0x20, + 0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x6f, 0x71, 0x20, 0x7f, 0x59, 0xd2, 0x5b, + 0x52, 0x32, 0xd6, 0xa2, 0xe1, 0xb6, 0xbf, 0x0d, 0x46, 0x13, 0xe2, 0x91, 0x1a, 0x86, 0x61, 0x97, + 0x7f, 0xa9, 0x58, 0x9b, 0x2f, 0x66, 0xdf, 0x5e, 0x52, 0x03, 0xa3, 0xbd, 0xc6, 0xe4, 0x05, 0x58, + 0x12, 0xb2, 0xaf, 0x41, 0x66, 0x6c, 0xac, 0xde, 0x9a, 0x29, 0xfb, 0x2b, 0x30, 0xc3, 0x6a, 0x1e, + 0x51, 0xeb, 0x63, 0x27, 0xf4, 0xe9, 0x19, 0xe1, 0xc5, 0xed, 0xff, 0xc5, 0x02, 0xb4, 0xe6, 0x37, + 0xdd, 0xcd, 0x3d, 0x41, 0x9c, 0x7f, 0xff, 0x47, 0x50, 0xe5, 0xd7, 0xea, 0x64, 0x08, 0xee, 0xc5, + 0x96, 0x13, 0x86, 0x9a, 0x2e, 0xff, 0x45, 0xd1, 0x6e, 0x75, 0xa3, 0x18, 0x1d, 0xf7, 0xa2, 0x87, + 0x3e, 0x48, 0x44, 0x44, 0xfd, 0x62, 0x2a, 0x22, 0xea, 0x8b, 0x99, 0x4e, 0x40, 0xe9, 0xde, 0xcb, + 0x48, 0xa9, 0xf6, 0x77, 0x2d, 0x98, 0x5a, 0x4f, 0x84, 0x94, 0xbe, 0xc4, 0x3c, 0x22, 0x32, 0x6c, + 0x54, 0x75, 0x56, 0x8a, 0x05, 0xf4, 0x89, 0xeb, 0x70, 0xff, 0xd4, 0x82, 0x38, 0x16, 0xdf, 0x31, + 0x88, 0xdc, 0x8b, 0x86, 0xc8, 0x9d, 0x79, 0x7d, 0x51, 0xdd, 0xc9, 0x93, 0xb8, 0xd1, 0x4d, 0x35, + 0x27, 0x05, 0x37, 0x97, 0x98, 0x0c, 0xdf, 0x67, 0x93, 0xe6, 0xc4, 0xa9, 0xd9, 0xf8, 0xfd, 0x12, + 0x20, 0x85, 0xdb, 0x77, 0x14, 0xdd, 0x74, 0x8d, 0x27, 0x13, 0x45, 0x77, 0x17, 0x10, 0xf3, 0xe9, + 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0xa1, 0xb5, 0x3e, 0x9a, 0xc3, 0x90, 0x72, 0x89, 0xbd, 0x95, + 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0x7c, 0xb5, 0x06, 0xfb, 0xf5, 0xd5, 0x1a, 0xea, 0xf1, 0xe8, 0xfe, + 0x57, 0x2c, 0x98, 0x50, 0xc3, 0xf4, 0x09, 0x79, 0xba, 0xa3, 0xfa, 0x93, 0x73, 0xae, 0xd4, 0xb4, + 0x2e, 0x33, 0x61, 0xe0, 0x47, 0x58, 0xf0, 0x04, 0xa7, 0xe5, 0x3e, 0x22, 0x2a, 0xd8, 0x7b, 0x55, + 0x04, 0x43, 0x10, 0xa5, 0x87, 0xfb, 0xd5, 0x09, 0xf5, 0x8f, 0xfb, 0x23, 0xc4, 0x55, 0xec, 0xbf, + 0x4d, 0x37, 0xbb, 0xb9, 0x14, 0xd1, 0x9b, 0x30, 0xd8, 0xd9, 0x76, 0x42, 0x92, 0x78, 0xe2, 0x38, + 0x58, 0xa3, 0x85, 0x87, 0xfb, 0xd5, 0x49, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7f, 0x6c, 0xe2, + 0xf4, 0xe2, 0xec, 0x19, 0x9b, 0xf8, 0xdf, 0x59, 0x30, 0xb0, 0x4e, 0x4f, 0xaf, 0xa7, 0xcf, 0x02, + 0xde, 0x35, 0x58, 0xc0, 0xb9, 0xbc, 0xb4, 0x67, 0xb9, 0xbb, 0x7f, 0x25, 0xb1, 0xfb, 0x2f, 0xe4, + 0x52, 0x28, 0xde, 0xf8, 0x6d, 0x18, 0x63, 0xc9, 0xd4, 0xc4, 0x73, 0xce, 0xd7, 0x8d, 0x0d, 0x5f, + 0x4d, 0x6c, 0xf8, 0x29, 0x0d, 0x55, 0xdb, 0xe9, 0x2f, 0xc1, 0xb0, 0x78, 0x1f, 0x98, 0x8c, 0x41, + 0x21, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x57, 0x06, 0x23, 0x79, 0x1b, 0xfa, 0x27, 0x16, 0xcc, 0x05, + 0xdc, 0xc5, 0xbf, 0xb9, 0xd4, 0x0d, 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x6e, 0xcb, 0xf5, + 0xb6, 0x56, 0xb7, 0x3c, 0x5f, 0x15, 0x2f, 0x3f, 0x24, 0x8d, 0xae, 0x8a, 0xdb, 0x53, 0x90, 0x29, + 0x4e, 0x3d, 0x93, 0x79, 0xed, 0x60, 0xbf, 0x3a, 0x87, 0x8f, 0x44, 0x1b, 0x1f, 0xb1, 0x2f, 0xe8, + 0x9f, 0x5b, 0x70, 0x85, 0x27, 0x11, 0xeb, 0xbf, 0xff, 0x05, 0x1a, 0x8e, 0x9a, 0x24, 0x15, 0x13, + 0xd9, 0x20, 0x41, 0x7b, 0xe1, 0x0b, 0x62, 0x40, 0xaf, 0xd4, 0x8e, 0xd6, 0x16, 0x3e, 0x6a, 0xe7, + 0xec, 0xff, 0xa6, 0x0c, 0x13, 0x22, 0x86, 0xad, 0x38, 0x03, 0xde, 0x34, 0x96, 0xc4, 0xb3, 0x89, + 0x25, 0x31, 0x63, 0x20, 0x3f, 0x19, 0xf6, 0x1f, 0xc2, 0x0c, 0x65, 0xce, 0x37, 0x88, 0x13, 0x44, + 0xf7, 0x89, 0xc3, 0x5d, 0x30, 0xcb, 0x47, 0xe6, 0xfe, 0x4a, 0xb1, 0x7e, 0x2b, 0x49, 0x0c, 0xa7, + 0xe9, 0xff, 0x30, 0x9d, 0x39, 0x1e, 0x4c, 0xa7, 0xc2, 0x10, 0x7f, 0x15, 0x46, 0xd5, 0xe3, 0x36, + 0xc1, 0x74, 0x8a, 0xa3, 0x79, 0x27, 0x29, 0x70, 0xa5, 0x67, 0xfc, 0xb0, 0x32, 0x26, 0x67, 0xff, + 0x72, 0xc9, 0x68, 0x90, 0x4f, 0xe2, 0x3a, 0x8c, 0x38, 0x21, 0xcb, 0x30, 0xd0, 0x2c, 0xd2, 0x68, + 0xa7, 0x9a, 0x61, 0x7e, 0x66, 0xf3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x0d, 0xee, 0xe8, 0xba, 0x4b, + 0x8a, 0xd4, 0xd9, 0x29, 0x6a, 0x20, 0x5d, 0x61, 0x77, 0x09, 0x16, 0xf5, 0xd1, 0xd7, 0xb9, 0x27, + 0xf2, 0x4d, 0xcf, 0x7f, 0xe0, 0x5d, 0xf7, 0x7d, 0x19, 0x04, 0xaa, 0x3f, 0x82, 0x33, 0xd2, 0xff, + 0x58, 0x55, 0xc7, 0x26, 0xb5, 0xfe, 0xe2, 0xfa, 0xff, 0x28, 0xb0, 0xa4, 0x49, 0x66, 0x2c, 0x89, + 0x10, 0x11, 0x98, 0x12, 0x01, 0x92, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xeb, 0xb7, 0x59, 0x3b, 0xb6, + 0x00, 0xdd, 0x34, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x9b, 0x33, 0xe1, 0x15, 0xe2, 0x44, 0xdd, 0x80, + 0x84, 0xe8, 0xcb, 0x50, 0x49, 0xdf, 0x8c, 0x85, 0x21, 0xc5, 0x62, 0xd2, 0xf3, 0xb9, 0x83, 0xfd, + 0x6a, 0xa5, 0x9e, 0x83, 0x83, 0x73, 0x6b, 0xdb, 0x3f, 0x6f, 0x01, 0x7b, 0xc1, 0x7f, 0x0c, 0x92, + 0xcf, 0x97, 0x4c, 0xc9, 0xa7, 0x92, 0x37, 0x9d, 0x39, 0x42, 0xcf, 0x1b, 0x7c, 0x0d, 0xd7, 0x02, + 0xff, 0xe1, 0x9e, 0xf0, 0xfa, 0xea, 0x7d, 0x8d, 0xb3, 0xbf, 0x67, 0x01, 0xcb, 0x30, 0x86, 0xf9, + 0xad, 0x5d, 0x1a, 0x38, 0x7a, 0x3b, 0x34, 0x7c, 0x19, 0x46, 0x36, 0xc5, 0xf0, 0x67, 0x28, 0x9d, + 0x8c, 0x0e, 0x9b, 0xb4, 0xe5, 0xa4, 0x89, 0x97, 0xb8, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2, + 0x82, 0xd9, 0xfc, 0x6a, 0xe8, 0x0e, 0x9c, 0x09, 0x48, 0xa3, 0x1b, 0x84, 0x74, 0x4b, 0x88, 0x0b, + 0x90, 0x78, 0x01, 0xc6, 0xa7, 0xfa, 0x99, 0x83, 0xfd, 0xea, 0x19, 0x9c, 0x8d, 0x82, 0xf3, 0xea, + 0xa2, 0xb7, 0x60, 0xb2, 0x1b, 0x72, 0xc9, 0x8f, 0x09, 0x5d, 0xa1, 0x08, 0x63, 0xcf, 0x1e, 0x49, + 0xdd, 0x31, 0x20, 0x38, 0x81, 0x69, 0xff, 0x79, 0xbe, 0x1c, 0x95, 0xc7, 0x6b, 0x1b, 0x66, 0x3c, + 0xed, 0x3f, 0x3d, 0x01, 0xe5, 0x55, 0xff, 0xf9, 0x5e, 0xa7, 0x3e, 0x3b, 0x2e, 0xb5, 0x18, 0x03, + 0x09, 0x32, 0x38, 0x4d, 0xd9, 0xfe, 0x9b, 0x16, 0x9c, 0xd1, 0x11, 0xb5, 0x17, 0x87, 0xbd, 0xac, + 0x80, 0x4b, 0x5a, 0x00, 0x3e, 0x7e, 0xcc, 0x5d, 0xce, 0x08, 0xc0, 0x77, 0x52, 0xa7, 0x5e, 0x18, + 0x6d, 0x8f, 0xbf, 0x2d, 0xcd, 0x8a, 0xb6, 0xf7, 0x47, 0x16, 0x5f, 0x9f, 0x7a, 0xd7, 0xd1, 0x47, + 0x30, 0xdd, 0x76, 0xa2, 0xc6, 0xf6, 0xf2, 0xc3, 0x4e, 0xc0, 0x8d, 0xbb, 0x72, 0x9c, 0x5e, 0xee, + 0x35, 0x4e, 0xda, 0x47, 0xc6, 0xde, 0xe0, 0x6b, 0x09, 0x62, 0x38, 0x45, 0x1e, 0xdd, 0x87, 0x31, + 0x56, 0xc6, 0xde, 0x62, 0x87, 0x45, 0xb2, 0x4c, 0x5e, 0x6b, 0xca, 0x39, 0x68, 0x2d, 0xa6, 0x83, + 0x75, 0xa2, 0xf6, 0x2f, 0x95, 0x39, 0xd3, 0x60, 0x77, 0x8f, 0x97, 0x60, 0xb8, 0xe3, 0x37, 0x17, + 0x57, 0x97, 0xb0, 0x98, 0x05, 0x75, 0xee, 0xd5, 0x78, 0x31, 0x96, 0x70, 0x74, 0x19, 0x46, 0xc4, + 0x4f, 0x69, 0x8c, 0x67, 0x7b, 0x44, 0xe0, 0x85, 0x58, 0x41, 0xd1, 0x6b, 0x00, 0x9d, 0xc0, 0xdf, + 0x75, 0x9b, 0x2c, 0xf6, 0x56, 0xd9, 0xf4, 0xeb, 0xab, 0x29, 0x08, 0xd6, 0xb0, 0xd0, 0xdb, 0x30, + 0xd1, 0xf5, 0x42, 0x2e, 0x3f, 0x69, 0xc9, 0x38, 0x94, 0xc7, 0xd9, 0x1d, 0x1d, 0x88, 0x4d, 0x5c, + 0x34, 0x0f, 0x43, 0x91, 0xc3, 0xfc, 0xd4, 0x06, 0xf3, 0x5f, 0x0c, 0x6c, 0x50, 0x0c, 0x3d, 0xed, + 0x25, 0xad, 0x80, 0x45, 0x45, 0xf4, 0x55, 0x19, 0x16, 0x81, 0x9f, 0x44, 0xe2, 0xa9, 0x4e, 0x7f, + 0xa7, 0x96, 0x16, 0x14, 0x41, 0x3c, 0x01, 0x32, 0x68, 0xa1, 0xb7, 0x00, 0xc8, 0xc3, 0x88, 0x04, + 0x9e, 0xd3, 0x52, 0xde, 0xa5, 0x4a, 0x90, 0x59, 0xf2, 0xd7, 0xfd, 0xe8, 0x4e, 0x48, 0x96, 0x15, + 0x06, 0xd6, 0xb0, 0xed, 0x9f, 0x18, 0x03, 0x88, 0x2f, 0x1a, 0xe8, 0x11, 0x8c, 0x34, 0x9c, 0x8e, + 0xd3, 0xe0, 0x39, 0x9d, 0xcb, 0x79, 0x0f, 0xcb, 0xe3, 0x1a, 0x73, 0x8b, 0x02, 0x9d, 0x1b, 0x6f, + 0x64, 0x3e, 0x83, 0x11, 0x59, 0xdc, 0xd3, 0x60, 0xa3, 0xda, 0x43, 0xdf, 0xb1, 0x60, 0x4c, 0xc4, + 0xb6, 0x62, 0x33, 0x54, 0xca, 0xb7, 0xb7, 0x69, 0xed, 0xcf, 0xc7, 0x35, 0x78, 0x17, 0x5e, 0x97, + 0x2b, 0x54, 0x83, 0xf4, 0xec, 0x85, 0xde, 0x30, 0xfa, 0x9c, 0xbc, 0xdb, 0x96, 0x8d, 0xa1, 0x54, + 0x77, 0xdb, 0x51, 0x76, 0xd4, 0xe8, 0xd7, 0xda, 0x3b, 0xc6, 0xb5, 0x76, 0x20, 0xff, 0x89, 0xb6, + 0x21, 0x6f, 0xf7, 0xba, 0xd1, 0xa2, 0x9a, 0x1e, 0x03, 0x66, 0x30, 0xff, 0x85, 0xaf, 0x76, 0xb1, + 0xeb, 0x11, 0xff, 0xe5, 0x5b, 0x30, 0xd5, 0x34, 0xa5, 0x16, 0xb1, 0x12, 0x5f, 0xcc, 0xa3, 0x9b, + 0x10, 0x72, 0x62, 0x39, 0x25, 0x01, 0xc0, 0x49, 0xc2, 0xa8, 0xc6, 0x43, 0x02, 0xad, 0x7a, 0x9b, + 0xbe, 0x78, 0x2e, 0x66, 0xe7, 0xce, 0xe5, 0x5e, 0x18, 0x91, 0x36, 0xc5, 0x8c, 0x85, 0x84, 0x75, + 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x7d, 0x18, 0x62, 0x4f, 0x3c, 0xc3, 0xca, 0x48, 0xbe, 0x59, 0xc3, + 0x8c, 0x2e, 0x1c, 0x6f, 0x48, 0xf6, 0x37, 0xc4, 0x82, 0x02, 0xba, 0x21, 0x1f, 0x50, 0x87, 0xab, + 0xde, 0x9d, 0x90, 0xb0, 0x07, 0xd4, 0xa3, 0x0b, 0xcf, 0xc7, 0x6f, 0xa3, 0x79, 0x79, 0x66, 0x72, + 0x6c, 0xa3, 0x26, 0x15, 0xfb, 0xc4, 0x7f, 0x99, 0x73, 0x5b, 0x44, 0xea, 0xcb, 0xec, 0x9e, 0x99, + 0x97, 0x3b, 0x1e, 0xce, 0xbb, 0x26, 0x09, 0x9c, 0xa4, 0x49, 0x45, 0x68, 0xbe, 0xeb, 0xc5, 0x83, + 0xb3, 0x5e, 0xbc, 0x83, 0x6b, 0x0e, 0xd8, 0x69, 0xc4, 0x4b, 0xb0, 0xa8, 0x8f, 0x5c, 0x98, 0x0a, + 0x0c, 0xf1, 0x42, 0x06, 0xd8, 0xbb, 0xd4, 0x9f, 0x10, 0xa3, 0x65, 0x19, 0x31, 0xc9, 0xe0, 0x24, + 0x5d, 0xf4, 0xbe, 0x26, 0x28, 0x4d, 0x14, 0xdf, 0xfc, 0x7b, 0x89, 0x46, 0xb3, 0x3b, 0x30, 0x61, + 0x30, 0x9b, 0xa7, 0x6a, 0x82, 0xf4, 0x60, 0x3a, 0xc9, 0x59, 0x9e, 0xaa, 0xe5, 0xf1, 0x2d, 0x98, + 0x64, 0x1b, 0xe1, 0x81, 0xd3, 0x11, 0xac, 0xf8, 0xb2, 0xc1, 0x8a, 0xad, 0xcb, 0x65, 0x3e, 0x30, + 0x72, 0x08, 0x62, 0xc6, 0x69, 0xff, 0x9d, 0x41, 0x51, 0x59, 0xed, 0x22, 0x74, 0x05, 0x46, 0x45, + 0x07, 0x54, 0xaa, 0x3e, 0xc5, 0x18, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x43, 0x23, 0xab, 0xae, + 0xbd, 0x50, 0x88, 0x33, 0x34, 0x2a, 0x08, 0xd6, 0xb0, 0xe8, 0xe5, 0xf7, 0xbe, 0xef, 0x47, 0xea, + 0x0c, 0x56, 0x5b, 0x6d, 0x81, 0x95, 0x62, 0x01, 0xa5, 0x67, 0xef, 0x0e, 0x09, 0x3c, 0xd2, 0x32, + 0x73, 0xd5, 0xa8, 0xb3, 0xf7, 0xa6, 0x0e, 0xc4, 0x26, 0x2e, 0x95, 0x20, 0xfc, 0x90, 0xed, 0x5d, + 0x71, 0xc5, 0x8e, 0x5f, 0x7c, 0xd4, 0x79, 0x90, 0x0f, 0x09, 0x47, 0x5f, 0x81, 0x33, 0x2a, 0xd8, + 0xa6, 0x58, 0x99, 0xb2, 0xc5, 0x21, 0x43, 0x23, 0x76, 0x66, 0x31, 0x1b, 0x0d, 0xe7, 0xd5, 0x47, + 0xef, 0xc2, 0xa4, 0xb8, 0x86, 0x49, 0x8a, 0xc3, 0xa6, 0xfb, 0xe2, 0x4d, 0x03, 0x8a, 0x13, 0xd8, + 0x32, 0xdb, 0x0e, 0xbb, 0x9f, 0x48, 0x0a, 0x23, 0xe9, 0x6c, 0x3b, 0x3a, 0x1c, 0xa7, 0x6a, 0xa0, + 0x79, 0x98, 0xe2, 0x62, 0xa7, 0xeb, 0x6d, 0xf1, 0x39, 0x11, 0x4f, 0x60, 0xd5, 0x86, 0xbc, 0x6d, + 0x82, 0x71, 0x12, 0x1f, 0x5d, 0x83, 0x71, 0x27, 0x68, 0x6c, 0xbb, 0x11, 0x69, 0xd0, 0x5d, 0xc5, + 0x3c, 0x08, 0x35, 0xff, 0xcf, 0x79, 0x0d, 0x86, 0x0d, 0x4c, 0xf4, 0x1e, 0x0c, 0x84, 0x0f, 0x9c, + 0x8e, 0xe0, 0x3e, 0xf9, 0xac, 0x5c, 0xad, 0x60, 0xee, 0xfa, 0x45, 0xff, 0x63, 0x56, 0xd3, 0x7e, + 0x04, 0x27, 0x32, 0x82, 0x12, 0xd1, 0xa5, 0xe7, 0x74, 0x5c, 0x39, 0x2a, 0x89, 0x67, 0x1a, 0xf3, + 0xb5, 0x55, 0x39, 0x1e, 0x1a, 0x16, 0x5d, 0xdf, 0x2c, 0x78, 0x51, 0x2d, 0x36, 0x24, 0xa9, 0xf5, + 0xbd, 0x22, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0xa4, 0x04, 0x53, 0x19, 0xe6, 0x41, 0x96, 0x1b, 0x3f, + 0x71, 0xcf, 0x8b, 0x53, 0xe1, 0x9b, 0xe9, 0x9f, 0x4a, 0x47, 0x48, 0xff, 0x54, 0xee, 0x95, 0xfe, + 0x69, 0xe0, 0xe3, 0xa4, 0x7f, 0x32, 0x47, 0x6c, 0xb0, 0xaf, 0x11, 0xcb, 0x48, 0x19, 0x35, 0x74, + 0xc4, 0x94, 0x51, 0xc6, 0xa0, 0x0f, 0xf7, 0x31, 0xe8, 0xff, 0x69, 0x09, 0xa6, 0x93, 0x96, 0xc5, + 0x63, 0xd0, 0xce, 0xbf, 0x6f, 0x68, 0xe7, 0x2f, 0xf7, 0x13, 0xf4, 0x20, 0x57, 0x53, 0x8f, 0x13, + 0x9a, 0xfa, 0xcf, 0xf6, 0x45, 0xad, 0x58, 0x6b, 0xff, 0xb7, 0x4a, 0x70, 0x2a, 0xd3, 0xe0, 0x7a, + 0x0c, 0x63, 0x73, 0xdb, 0x18, 0x9b, 0x57, 0xfb, 0x0e, 0x08, 0x91, 0x3b, 0x40, 0xf7, 0x12, 0x03, + 0x74, 0xa5, 0x7f, 0x92, 0xc5, 0xa3, 0xf4, 0xfd, 0x32, 0x5c, 0xc8, 0xac, 0x17, 0x2b, 0xb7, 0x57, + 0x0c, 0xe5, 0xf6, 0x6b, 0x09, 0xe5, 0xb6, 0x5d, 0x5c, 0xfb, 0xc9, 0x68, 0xbb, 0x45, 0x60, 0x04, + 0x16, 0xde, 0xe5, 0x31, 0x35, 0xdd, 0x46, 0x60, 0x04, 0x45, 0x08, 0x9b, 0x74, 0x7f, 0x98, 0x34, + 0xdc, 0xff, 0x83, 0x05, 0x67, 0x33, 0xe7, 0xe6, 0x18, 0xf4, 0x8c, 0xeb, 0xa6, 0x9e, 0xf1, 0xa5, + 0xbe, 0x57, 0x6b, 0x8e, 0xe2, 0xf1, 0xbb, 0x43, 0x39, 0xdf, 0xc2, 0xd4, 0x1f, 0xb7, 0x61, 0xcc, + 0x69, 0x34, 0x48, 0x18, 0xae, 0xb1, 0x54, 0x13, 0xdc, 0xf6, 0xfa, 0x2a, 0xbb, 0x9c, 0xc6, 0xc5, + 0x87, 0xfb, 0xd5, 0xd9, 0x24, 0x89, 0x18, 0x8c, 0x75, 0x0a, 0xe8, 0xeb, 0x30, 0x12, 0xca, 0x24, + 0xbf, 0x03, 0x8f, 0x9f, 0xe4, 0x97, 0x49, 0x92, 0x4a, 0xbd, 0xa3, 0x48, 0xa2, 0x3f, 0xa7, 0x87, + 0xf7, 0x2a, 0x50, 0x6c, 0xf2, 0x4e, 0x3e, 0x46, 0x90, 0x2f, 0xf3, 0x39, 0x7c, 0xb9, 0xaf, 0xe7, + 0xf0, 0xef, 0xc1, 0x74, 0xc8, 0xc3, 0xe5, 0xc6, 0x2e, 0x32, 0x7c, 0x2d, 0xb2, 0x88, 0x83, 0xf5, + 0x04, 0x0c, 0xa7, 0xb0, 0xd1, 0x8a, 0x6c, 0x95, 0x39, 0x43, 0xf1, 0xe5, 0x79, 0x29, 0x6e, 0x51, + 0x38, 0x44, 0x9d, 0x4c, 0x4e, 0x02, 0x1b, 0x7e, 0xad, 0x26, 0xfa, 0x3a, 0x00, 0x5d, 0x44, 0x42, + 0x85, 0x33, 0x9c, 0xcf, 0x42, 0x29, 0x6f, 0x69, 0x66, 0xbe, 0xc0, 0x60, 0x11, 0x0d, 0x96, 0x14, + 0x11, 0xac, 0x11, 0x44, 0x0e, 0x4c, 0xc4, 0xff, 0x30, 0xd9, 0x2c, 0x0a, 0xb0, 0xc6, 0x5a, 0x48, + 0x12, 0x67, 0xe6, 0x8d, 0x25, 0x9d, 0x04, 0x36, 0x29, 0xa2, 0xaf, 0xc1, 0xd9, 0xdd, 0x5c, 0xbf, + 0x23, 0x2e, 0x4b, 0x9e, 0x3f, 0xd8, 0xaf, 0x9e, 0xcd, 0xf7, 0x36, 0xca, 0xaf, 0x6f, 0xff, 0x8f, + 0x00, 0xcf, 0x14, 0x70, 0x7a, 0x34, 0x6f, 0xfa, 0x0c, 0xbc, 0x9c, 0xd4, 0xab, 0xcc, 0x66, 0x56, + 0x36, 0x14, 0x2d, 0x89, 0x0d, 0x55, 0xfa, 0xd8, 0x1b, 0xea, 0xa7, 0x2c, 0xed, 0x9a, 0xc5, 0x3d, + 0xca, 0xbf, 0x74, 0xc4, 0x13, 0xec, 0x09, 0xaa, 0xc0, 0x36, 0x33, 0xf4, 0x48, 0xaf, 0xf5, 0xdd, + 0x9d, 0xfe, 0x15, 0x4b, 0xbf, 0x9a, 0x9d, 0x60, 0x80, 0xab, 0x98, 0xae, 0x1f, 0xf5, 0xfb, 0x8f, + 0x2b, 0xd9, 0xc0, 0xef, 0x5b, 0x70, 0x36, 0x55, 0xcc, 0xfb, 0x40, 0x42, 0x11, 0xce, 0x70, 0xfd, + 0x63, 0x77, 0x5e, 0x12, 0xe4, 0xdf, 0x70, 0x43, 0x7c, 0xc3, 0xd9, 0x5c, 0xbc, 0x64, 0xd7, 0x7f, + 0xf2, 0x5f, 0x55, 0x4f, 0xb0, 0x06, 0x4c, 0x44, 0x9c, 0xdf, 0x75, 0xd4, 0x81, 0x8b, 0x8d, 0x6e, + 0x10, 0xc4, 0x8b, 0x35, 0x63, 0x73, 0xf2, 0xdb, 0xe2, 0xf3, 0x07, 0xfb, 0xd5, 0x8b, 0x8b, 0x3d, + 0x70, 0x71, 0x4f, 0x6a, 0xc8, 0x03, 0xd4, 0x4e, 0x79, 0xf7, 0x31, 0x06, 0x90, 0xa3, 0x05, 0x4a, + 0xfb, 0x02, 0x72, 0x3f, 0xdd, 0x0c, 0x1f, 0xc1, 0x0c, 0xca, 0xc7, 0xab, 0xbb, 0xf9, 0xc1, 0x64, + 0x33, 0x98, 0xbd, 0x05, 0x17, 0x8a, 0x17, 0xd3, 0x91, 0x42, 0x50, 0xfc, 0x9e, 0x05, 0xe7, 0x0b, + 0x43, 0xb3, 0xfd, 0x19, 0xbc, 0x2c, 0xd8, 0xdf, 0xb6, 0xe0, 0xd9, 0xcc, 0x1a, 0xc9, 0xc7, 0x83, + 0x0d, 0x5a, 0xa8, 0x39, 0xc3, 0xc6, 0x41, 0x8a, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xbf, 0x68, 0xa9, + 0xa7, 0xbf, 0xe8, 0x3f, 0xb5, 0x20, 0x75, 0xd4, 0x1f, 0x83, 0xe4, 0xb9, 0x6a, 0x4a, 0x9e, 0xcf, + 0xf7, 0x33, 0x9a, 0x39, 0x42, 0xe7, 0xbf, 0x9d, 0x82, 0xd3, 0x39, 0x2f, 0xc8, 0x77, 0x61, 0x66, + 0xab, 0x41, 0xcc, 0x90, 0x21, 0x45, 0xd1, 0xff, 0x0a, 0xe3, 0x8b, 0x2c, 0x9c, 0x3a, 0xd8, 0xaf, + 0xce, 0xa4, 0x50, 0x70, 0xba, 0x09, 0xf4, 0x6d, 0x0b, 0x4e, 0x3a, 0x0f, 0xc2, 0x65, 0x7a, 0x83, + 0x70, 0x1b, 0x0b, 0x2d, 0xbf, 0xb1, 0x43, 0x05, 0x33, 0xb9, 0xad, 0xde, 0xc8, 0x54, 0x85, 0xdf, + 0xab, 0xa7, 0xf0, 0x8d, 0xe6, 0x2b, 0x07, 0xfb, 0xd5, 0x93, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, + 0x45, 0x0e, 0x3f, 0x27, 0xda, 0x2e, 0x0a, 0x6a, 0x93, 0xf5, 0xd4, 0x9f, 0x8b, 0xc4, 0x12, 0x82, + 0x15, 0x1d, 0xf4, 0x4d, 0x18, 0xdd, 0x92, 0xf1, 0x2b, 0x32, 0x44, 0xee, 0x78, 0x20, 0x8b, 0xa3, + 0x7a, 0x70, 0x07, 0x1c, 0x85, 0x84, 0x63, 0xa2, 0xe8, 0x5d, 0x28, 0x7b, 0x9b, 0x61, 0x51, 0x08, + 0xe9, 0x84, 0xa7, 0x35, 0x8f, 0x76, 0xb5, 0xbe, 0x52, 0xc7, 0xb4, 0x22, 0xba, 0x01, 0xe5, 0xe0, + 0x7e, 0x53, 0xd8, 0x71, 0x32, 0x37, 0x29, 0x5e, 0x58, 0xca, 0xe9, 0x15, 0xa3, 0x84, 0x17, 0x96, + 0x30, 0x25, 0x81, 0x6a, 0x30, 0xc8, 0x9e, 0x5d, 0x0b, 0xd1, 0x36, 0xf3, 0x2a, 0x5f, 0x10, 0xbe, + 0x80, 0xbf, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x1b, 0x30, 0xd4, 0x70, 0xbd, 0x26, 0x09, 0x84, + 0x2c, 0xfb, 0xb9, 0x4c, 0x8b, 0x0d, 0xc3, 0xc8, 0xa1, 0xc9, 0x0d, 0x18, 0x0c, 0x03, 0x0b, 0x5a, + 0x8c, 0x2a, 0xe9, 0x6c, 0x6f, 0xca, 0x13, 0x2b, 0x9b, 0x2a, 0xe9, 0x6c, 0xaf, 0xd4, 0x0b, 0xa9, + 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xb7, 0xa0, 0xb4, 0xd9, 0x10, 0x4f, 0xaa, 0x33, 0xd5, 0x9b, 0x66, + 0xc0, 0xb2, 0x85, 0xa1, 0x83, 0xfd, 0x6a, 0x69, 0x65, 0x11, 0x97, 0x36, 0x1b, 0x68, 0x1d, 0x86, + 0x37, 0x79, 0xbc, 0x20, 0xa1, 0x1f, 0x7d, 0x31, 0x3b, 0x94, 0x51, 0x2a, 0xa4, 0x10, 0x7f, 0xdb, + 0x2a, 0x00, 0x58, 0x12, 0x61, 0x09, 0xcf, 0x54, 0xdc, 0x23, 0x11, 0x29, 0x76, 0xee, 0x68, 0xb1, + 0xaa, 0x44, 0xa0, 0x71, 0x45, 0x05, 0x6b, 0x14, 0xe9, 0xaa, 0x76, 0x1e, 0x75, 0x03, 0x96, 0x11, + 0x45, 0x18, 0x66, 0x32, 0x57, 0xf5, 0xbc, 0x44, 0x2a, 0x5a, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1, + 0x0e, 0x4c, 0xec, 0x86, 0x9d, 0x6d, 0x22, 0xb7, 0x34, 0x8b, 0x30, 0x98, 0x23, 0xcd, 0xde, 0x15, + 0x88, 0x6e, 0x10, 0x75, 0x9d, 0x56, 0x8a, 0x0b, 0xb1, 0x6b, 0xcd, 0x5d, 0x9d, 0x18, 0x36, 0x69, + 0xd3, 0xe1, 0xff, 0xa8, 0xeb, 0xdf, 0xdf, 0x8b, 0x88, 0x08, 0xf0, 0x9a, 0x39, 0xfc, 0x1f, 0x70, + 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, 0x31, 0x3c, 0x8c, 0x7b, 0x4e, 0xe7, 0x07, + 0xc2, 0x9f, 0x97, 0x48, 0x39, 0x83, 0xc2, 0xb8, 0x65, 0x4c, 0x8a, 0x71, 0xc9, 0xce, 0xb6, 0x1f, + 0xf9, 0x5e, 0x82, 0x43, 0xcf, 0xe4, 0x73, 0xc9, 0x5a, 0x06, 0x7e, 0x9a, 0x4b, 0x66, 0x61, 0xe1, + 0xcc, 0xb6, 0x50, 0x13, 0x26, 0x3b, 0x7e, 0x10, 0x3d, 0xf0, 0x03, 0xb9, 0xbe, 0x50, 0x81, 0xa2, + 0xd4, 0xc0, 0x14, 0x2d, 0x32, 0xb7, 0x20, 0x13, 0x82, 0x13, 0x34, 0xd1, 0x97, 0x61, 0x38, 0x6c, + 0x38, 0x2d, 0xb2, 0x7a, 0xbb, 0x72, 0x22, 0xff, 0xf8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0xc3, + 0x3d, 0x71, 0x14, 0x2c, 0xc9, 0xa1, 0x15, 0x18, 0x64, 0x39, 0xef, 0x59, 0x34, 0xe2, 0x9c, 0x78, + 0xfe, 0xa9, 0x47, 0x3d, 0x9c, 0x37, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x34, 0x05, 0x7e, + 0x58, 0x39, 0x95, 0xbf, 0x07, 0x84, 0x82, 0xe1, 0x76, 0xbd, 0x68, 0x0f, 0x28, 0x24, 0x1c, 0x13, + 0xa5, 0x9c, 0x99, 0x72, 0xd3, 0xd3, 0x05, 0x0e, 0x9b, 0xb9, 0xbc, 0x94, 0x71, 0x66, 0xca, 0x49, + 0x29, 0x09, 0xfb, 0x37, 0x47, 0xd2, 0x32, 0x0b, 0xd3, 0x30, 0xfd, 0xc7, 0x56, 0xca, 0x63, 0xe3, + 0xf3, 0xfd, 0x2a, 0xbc, 0x9f, 0xe0, 0xc5, 0xf5, 0xdb, 0x16, 0x9c, 0xee, 0x64, 0x7e, 0x88, 0x10, + 0x00, 0xfa, 0xd3, 0x9b, 0xf3, 0x4f, 0x57, 0x91, 0xab, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xa4, 0x72, + 0xa0, 0xfc, 0xb1, 0x95, 0x03, 0x6b, 0x30, 0xd2, 0xe0, 0x37, 0x39, 0x99, 0x3c, 0xa2, 0xaf, 0xb8, + 0xab, 0xdc, 0x4e, 0x2b, 0x2a, 0x62, 0x45, 0x02, 0xfd, 0xb4, 0x05, 0xe7, 0x93, 0x5d, 0xc7, 0x84, + 0x81, 0x85, 0xbb, 0x26, 0x57, 0x6b, 0xad, 0x88, 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf2, 0x61, 0x2f, + 0x04, 0x5c, 0xdc, 0x18, 0x5a, 0xca, 0xd0, 0xab, 0x0d, 0x99, 0x36, 0xc9, 0x3e, 0x74, 0x6b, 0x6f, + 0xc0, 0x78, 0xdb, 0xef, 0x7a, 0x91, 0xf0, 0xba, 0x14, 0xae, 0x5b, 0xcc, 0x65, 0x69, 0x4d, 0x2b, + 0xc7, 0x06, 0x56, 0x42, 0x23, 0x37, 0xf2, 0xd8, 0x1a, 0xb9, 0x0f, 0x61, 0xdc, 0xd3, 0x1e, 0x24, + 0x14, 0xdd, 0x60, 0x85, 0x76, 0x51, 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0x58, 0x5b, + 0x06, 0x1f, 0x4f, 0x5b, 0x76, 0xac, 0x57, 0x62, 0xfb, 0xef, 0x95, 0x32, 0x6e, 0x0c, 0x5c, 0x2b, + 0xf7, 0x8e, 0xa9, 0x95, 0xbb, 0x94, 0xd4, 0xca, 0xa5, 0x4c, 0x55, 0x86, 0x42, 0xae, 0xff, 0x0c, + 0xa6, 0x7d, 0xc7, 0xd2, 0xfe, 0x0b, 0x16, 0x9c, 0x61, 0xb6, 0x0f, 0xda, 0xc0, 0xc7, 0xb6, 0x77, + 0x30, 0x87, 0xd8, 0x5b, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x0b, 0x2e, 0xf6, 0x3a, 0x77, 0x99, + 0x7f, 0x71, 0x53, 0xb9, 0x57, 0xc4, 0xfe, 0xc5, 0xcd, 0xd5, 0x25, 0xcc, 0x20, 0xfd, 0x86, 0x5d, + 0xb4, 0xff, 0x4f, 0x0b, 0xca, 0x35, 0xbf, 0x79, 0x0c, 0x37, 0xfa, 0x2f, 0x19, 0x37, 0xfa, 0x67, + 0xb2, 0x4f, 0xfc, 0x66, 0xae, 0xb1, 0x6f, 0x39, 0x61, 0xec, 0x3b, 0x9f, 0x47, 0xa0, 0xd8, 0xb4, + 0xf7, 0xb7, 0xcb, 0x30, 0x56, 0xf3, 0x9b, 0x6a, 0x9f, 0xfd, 0x77, 0x8f, 0xf3, 0x8c, 0x28, 0x37, + 0x67, 0x99, 0x46, 0x99, 0xf9, 0x13, 0xcb, 0xa8, 0x17, 0x7f, 0xc6, 0x5e, 0x13, 0xdd, 0x23, 0xee, + 0xd6, 0x76, 0x44, 0x9a, 0xc9, 0xcf, 0x39, 0xbe, 0xd7, 0x44, 0x7f, 0x58, 0x86, 0xa9, 0x44, 0xeb, + 0xa8, 0x05, 0x13, 0x2d, 0xdd, 0x94, 0x24, 0xd6, 0xe9, 0x63, 0x59, 0xa1, 0xc4, 0x6b, 0x0c, 0xad, + 0x08, 0x9b, 0xc4, 0xd1, 0x1c, 0x80, 0xa7, 0xfb, 0xa4, 0xab, 0x98, 0xd0, 0x9a, 0x3f, 0xba, 0x86, + 0x81, 0xde, 0x84, 0xb1, 0xc8, 0xef, 0xf8, 0x2d, 0x7f, 0x6b, 0xef, 0xa6, 0x8a, 0x8f, 0xac, 0x5c, + 0x96, 0x37, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x43, 0x98, 0x51, 0x44, 0xea, 0x4f, 0xc0, 0xbc, 0xc6, + 0xd4, 0x26, 0xeb, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x0b, 0x26, 0x99, 0xef, 0x34, 0xab, 0x7f, + 0x93, 0xec, 0xc9, 0xe0, 0xd2, 0x4c, 0xc2, 0x5e, 0x33, 0x20, 0x38, 0x81, 0x89, 0x16, 0x61, 0xa6, + 0xed, 0x86, 0x89, 0xea, 0x43, 0xac, 0x3a, 0xeb, 0xc0, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0x2f, + 0x88, 0x39, 0xf6, 0x22, 0xf7, 0xd3, 0xed, 0xf8, 0xc9, 0xde, 0x8e, 0xdf, 0xb7, 0x60, 0x9a, 0xb6, + 0xce, 0x1c, 0x42, 0xa5, 0x20, 0xa5, 0xd2, 0x8f, 0x58, 0x05, 0xe9, 0x47, 0x2e, 0x51, 0xb6, 0xdd, + 0xf4, 0xbb, 0x91, 0xd0, 0x8e, 0x6a, 0x7c, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0xc4, + 0xab, 0x7b, 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x99, 0x9d, 0x64, 0x20, 0x3b, 0x3b, 0x09, 0x0f, + 0x32, 0x2f, 0xfc, 0xe8, 0x84, 0x48, 0xab, 0x05, 0x99, 0x97, 0x0e, 0x76, 0x31, 0x8e, 0xfd, 0xd7, + 0xca, 0x50, 0xa9, 0xf9, 0xcd, 0x45, 0x12, 0x44, 0xee, 0xa6, 0xdb, 0x70, 0x22, 0xa2, 0xe5, 0xdb, + 0x7d, 0x0d, 0x80, 0x3d, 0x22, 0x0b, 0xb2, 0x22, 0xa8, 0xd7, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x2a, + 0xd9, 0x21, 0x7b, 0xda, 0xc9, 0xab, 0xa4, 0x92, 0x9b, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0xc5, 0x42, + 0x19, 0x2d, 0x3f, 0xec, 0xb8, 0x01, 0xcf, 0x4c, 0x4e, 0x1a, 0xbe, 0xd7, 0x0c, 0x45, 0xe0, 0xb7, + 0x8a, 0x08, 0x44, 0x94, 0x82, 0xe3, 0xcc, 0x5a, 0xa8, 0x06, 0x27, 0x1b, 0x01, 0x69, 0x12, 0x2f, + 0x72, 0x9d, 0xd6, 0x42, 0xd7, 0x6b, 0xb6, 0x78, 0x4a, 0x9e, 0x01, 0x23, 0x83, 0xe8, 0xc9, 0xc5, + 0x0c, 0x1c, 0x9c, 0x59, 0x53, 0x7c, 0x0a, 0x23, 0x32, 0x98, 0xfa, 0x14, 0x56, 0x4f, 0xc2, 0x59, + 0xe3, 0xf1, 0x10, 0x2e, 0x6e, 0x3b, 0xae, 0xc7, 0xea, 0x0d, 0x25, 0x1a, 0xcf, 0xc0, 0xc1, 0x99, + 0x35, 0xed, 0x3f, 0x2d, 0xc3, 0x38, 0x9d, 0x18, 0xe5, 0x71, 0xf3, 0x86, 0xe1, 0x71, 0x73, 0x31, + 0xe1, 0x71, 0x33, 0xad, 0xe3, 0x6a, 0xfe, 0x35, 0xef, 0x03, 0xf2, 0x45, 0x52, 0x82, 0xeb, 0xc4, + 0x23, 0x7c, 0xc8, 0x98, 0x92, 0xb1, 0x1c, 0xfb, 0xa3, 0xdc, 0x4e, 0x61, 0xe0, 0x8c, 0x5a, 0x9f, + 0xfa, 0xea, 0x1c, 0xaf, 0xaf, 0xce, 0x6f, 0x59, 0x6c, 0x05, 0x2c, 0xad, 0xd7, 0xb9, 0x13, 0x39, + 0xba, 0x0a, 0x63, 0xec, 0x18, 0x63, 0xb1, 0x3c, 0xa4, 0x4b, 0x0b, 0xcb, 0x6e, 0xbb, 0x1e, 0x17, + 0x63, 0x1d, 0x07, 0x5d, 0x86, 0x91, 0x90, 0x38, 0x41, 0x63, 0x5b, 0x9d, 0xe1, 0xc2, 0xff, 0x84, + 0x97, 0x61, 0x05, 0x45, 0x1f, 0xc4, 0x11, 0xe1, 0xcb, 0xf9, 0x1e, 0xe9, 0x7a, 0x7f, 0x38, 0x1f, + 0xcc, 0x0f, 0x03, 0x6f, 0xdf, 0x03, 0x94, 0xc6, 0xef, 0xe3, 0x89, 0x5f, 0xd5, 0x8c, 0x59, 0x3c, + 0x9a, 0x8a, 0x57, 0xfc, 0xef, 0x2d, 0x98, 0xac, 0xf9, 0x4d, 0xca, 0x9f, 0x7f, 0x98, 0x98, 0xb1, + 0x9e, 0xc1, 0x63, 0xa8, 0x20, 0x83, 0xc7, 0x81, 0x05, 0x17, 0xd8, 0xe7, 0x47, 0xc4, 0x6b, 0xc6, + 0x06, 0x4f, 0xdd, 0xdf, 0xe3, 0x01, 0x4c, 0x05, 0x3c, 0x7c, 0xd7, 0x9a, 0xd3, 0xe9, 0xb8, 0xde, + 0x96, 0x7c, 0xdf, 0xf6, 0x46, 0xe1, 0xbb, 0x8d, 0x24, 0x49, 0x11, 0x02, 0x4c, 0x77, 0x54, 0x35, + 0x88, 0xe2, 0x64, 0x2b, 0x3c, 0x2b, 0x8d, 0xd6, 0x1f, 0x2d, 0x41, 0xa5, 0x96, 0x95, 0x26, 0x81, + 0x80, 0xd3, 0x75, 0xec, 0xe7, 0x60, 0xb0, 0xe6, 0x37, 0x7b, 0x04, 0x8f, 0xfe, 0x3b, 0x16, 0x0c, + 0xd7, 0xfc, 0xe6, 0x31, 0x98, 0x10, 0xdf, 0x31, 0x4d, 0x88, 0x67, 0x72, 0x36, 0x47, 0x8e, 0xd5, + 0xf0, 0x9f, 0x0d, 0xc0, 0x04, 0xed, 0xa7, 0xbf, 0x25, 0xd7, 0xab, 0xb1, 0x36, 0xac, 0x3e, 0xd6, + 0x06, 0xbd, 0xd0, 0xfa, 0xad, 0x96, 0xff, 0x20, 0xb9, 0x76, 0x57, 0x58, 0x29, 0x16, 0x50, 0xf4, + 0x0a, 0x8c, 0x74, 0x02, 0xb2, 0xeb, 0xfa, 0xe2, 0xa6, 0xa8, 0x19, 0x64, 0x6b, 0xa2, 0x1c, 0x2b, + 0x0c, 0xf4, 0x06, 0x8c, 0x87, 0xae, 0x47, 0xa5, 0x62, 0x7e, 0xf4, 0x0e, 0xb0, 0x83, 0x81, 0xe7, + 0xd2, 0xd3, 0xca, 0xb1, 0x81, 0x85, 0xee, 0xc1, 0x28, 0xfb, 0xcf, 0x78, 0xeb, 0xe0, 0x91, 0x79, + 0xab, 0x48, 0x94, 0x2e, 0x08, 0xe0, 0x98, 0x16, 0x15, 0x38, 0x22, 0x99, 0x8f, 0x2a, 0x14, 0x41, + 0x84, 0x95, 0xc0, 0xa1, 0x32, 0x55, 0x85, 0x58, 0xc3, 0x42, 0x2f, 0xc3, 0x68, 0xe4, 0xb8, 0xad, + 0x5b, 0xae, 0xc7, 0x3c, 0x51, 0x68, 0xff, 0x45, 0xbe, 0x72, 0x51, 0x88, 0x63, 0x38, 0xbd, 0xd5, + 0xb0, 0xd8, 0x6a, 0x0b, 0x7b, 0x91, 0xc8, 0xa2, 0x59, 0xe6, 0xb7, 0x9a, 0x5b, 0xaa, 0x14, 0x6b, + 0x18, 0x68, 0x1b, 0xce, 0xb9, 0x1e, 0xcb, 0x3b, 0x47, 0xea, 0x3b, 0x6e, 0x67, 0xe3, 0x56, 0xfd, + 0x2e, 0x09, 0xdc, 0xcd, 0xbd, 0x05, 0xa7, 0xb1, 0x43, 0xbc, 0x26, 0x53, 0x7a, 0x8d, 0x2c, 0x3c, + 0x2f, 0xba, 0x78, 0x6e, 0xb5, 0x00, 0x17, 0x17, 0x52, 0x42, 0x36, 0xe5, 0x39, 0x01, 0x71, 0xda, + 0x42, 0xbb, 0xc5, 0x73, 0x56, 0xb1, 0x12, 0x2c, 0x20, 0xf6, 0xeb, 0x6c, 0x4f, 0xdc, 0xae, 0xa3, + 0xcf, 0x1a, 0x3c, 0xf4, 0xb4, 0xce, 0x43, 0x0f, 0xf7, 0xab, 0x43, 0xb7, 0xeb, 0x5a, 0x9c, 0xad, + 0x6b, 0x70, 0xaa, 0xe6, 0x37, 0x6b, 0x7e, 0x10, 0xad, 0xf8, 0xc1, 0x03, 0x27, 0x68, 0xca, 0x25, + 0x58, 0x95, 0x91, 0xc6, 0x28, 0x67, 0x18, 0xe4, 0x6c, 0xd6, 0x88, 0x22, 0xf6, 0x3a, 0xbb, 0x9f, + 0x1c, 0xf1, 0x61, 0x77, 0x83, 0x49, 0xca, 0x2a, 0xbb, 0xe3, 0x75, 0x27, 0x22, 0xe8, 0x36, 0x4c, + 0x34, 0x74, 0xd9, 0x44, 0x54, 0x7f, 0x49, 0x9e, 0xe8, 0x86, 0xe0, 0x92, 0x29, 0xcc, 0x98, 0xf5, + 0xed, 0xdf, 0xb7, 0x44, 0x2b, 0x1a, 0xd7, 0xe8, 0xe3, 0x60, 0x59, 0xcc, 0x62, 0x4e, 0xfc, 0xa6, + 0x7a, 0xaa, 0x5f, 0xc6, 0x84, 0xbe, 0x06, 0x67, 0x8d, 0x42, 0xe9, 0x14, 0xa2, 0xe5, 0xdf, 0x67, + 0x9a, 0x49, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x06, 0xa7, 0x93, 0xdf, 0x25, 0x38, 0xfa, + 0x63, 0x7e, 0x5d, 0xe9, 0x68, 0x5f, 0x67, 0xbf, 0x09, 0x33, 0x35, 0x5f, 0x8b, 0xa2, 0xc2, 0xe6, + 0xaf, 0x77, 0x30, 0xb7, 0x5f, 0x1e, 0x61, 0x67, 0x7d, 0x22, 0x65, 0x23, 0xfa, 0x06, 0x4c, 0x86, + 0x84, 0x45, 0x30, 0x94, 0x3a, 0xea, 0x82, 0xa8, 0x0c, 0xf5, 0x65, 0x1d, 0x93, 0xdf, 0xc3, 0xcd, + 0x32, 0x9c, 0xa0, 0x86, 0xda, 0x30, 0xf9, 0xc0, 0xf5, 0x9a, 0xfe, 0x83, 0x50, 0xd2, 0x1f, 0xc9, + 0x37, 0x78, 0xdd, 0xe3, 0x98, 0x89, 0x3e, 0x1a, 0xcd, 0xdd, 0x33, 0x88, 0xe1, 0x04, 0x71, 0xca, + 0x6a, 0x82, 0xae, 0x37, 0x1f, 0xde, 0x09, 0x49, 0x20, 0xe2, 0x2b, 0x32, 0x56, 0x83, 0x65, 0x21, + 0x8e, 0xe1, 0x94, 0xd5, 0xb0, 0x3f, 0x2c, 0xac, 0x03, 0xe3, 0x65, 0x82, 0xd5, 0x60, 0x55, 0x8a, + 0x35, 0x0c, 0xca, 0x8a, 0xd9, 0xbf, 0x75, 0xdf, 0xc3, 0xbe, 0x1f, 0x49, 0xe6, 0xcd, 0xb2, 0xea, + 0x6a, 0xe5, 0xd8, 0xc0, 0xca, 0x89, 0xe6, 0x38, 0x70, 0xd4, 0x68, 0x8e, 0x28, 0x2a, 0x88, 0x64, + 0xc1, 0xe3, 0x91, 0x5f, 0x2b, 0x8a, 0x64, 0x71, 0xf8, 0x58, 0x51, 0x2e, 0xa8, 0xc0, 0xb3, 0x29, + 0x06, 0x68, 0x90, 0x87, 0xab, 0x64, 0x26, 0xf9, 0x3a, 0x1f, 0x1d, 0x09, 0x43, 0xcb, 0x30, 0x1c, + 0xee, 0x85, 0x8d, 0xa8, 0x15, 0x16, 0x65, 0x4e, 0xae, 0x33, 0x94, 0x58, 0x1e, 0xe5, 0xff, 0x43, + 0x2c, 0xeb, 0xa2, 0x06, 0x9c, 0x10, 0x14, 0x17, 0xb7, 0x1d, 0x4f, 0x65, 0x56, 0xe5, 0xbe, 0xb7, + 0x57, 0x0f, 0xf6, 0xab, 0x27, 0x44, 0xcb, 0x3a, 0xf8, 0x70, 0xbf, 0x4a, 0xb7, 0x64, 0x06, 0x04, + 0x67, 0x51, 0xe3, 0x4b, 0xbe, 0xd1, 0xf0, 0xdb, 0x9d, 0x5a, 0xe0, 0x6f, 0xba, 0x2d, 0x52, 0xe4, + 0xd6, 0x50, 0x37, 0x30, 0xc5, 0x92, 0x37, 0xca, 0x70, 0x82, 0x1a, 0xba, 0x0f, 0x53, 0x4e, 0xa7, + 0x33, 0x1f, 0xb4, 0xfd, 0x40, 0x36, 0x30, 0x96, 0x6f, 0x1f, 0x9b, 0x37, 0x51, 0x79, 0x62, 0xd5, + 0x44, 0x21, 0x4e, 0x12, 0xa4, 0x03, 0x25, 0x36, 0x9a, 0x31, 0x50, 0x13, 0xf1, 0x40, 0x89, 0x7d, + 0x99, 0x31, 0x50, 0x19, 0x10, 0x9c, 0x45, 0xcd, 0xfe, 0xf3, 0xec, 0x76, 0xc3, 0xa2, 0x9d, 0xb3, + 0x47, 0x6e, 0x6d, 0x98, 0xe8, 0x30, 0xb6, 0x2f, 0x92, 0x1e, 0x0a, 0x56, 0xf1, 0x46, 0x9f, 0x6a, + 0xf8, 0x07, 0x2c, 0xab, 0xb3, 0xe1, 0x8e, 0x5d, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xaf, 0x67, + 0x99, 0xe8, 0x58, 0xe7, 0xba, 0xf5, 0x61, 0xf1, 0xe4, 0x57, 0x48, 0xc9, 0xb3, 0xf9, 0x56, 0xac, + 0x78, 0x7d, 0x89, 0x67, 0xc3, 0x58, 0xd6, 0x45, 0x5f, 0x87, 0x49, 0xd7, 0x73, 0xe3, 0x24, 0xeb, + 0x61, 0xe5, 0x64, 0x7e, 0x2c, 0x39, 0x85, 0xa5, 0x27, 0x44, 0xd5, 0x2b, 0xe3, 0x04, 0x31, 0xf4, + 0x01, 0xf3, 0x50, 0x96, 0xa4, 0x4b, 0xfd, 0x90, 0xd6, 0x9d, 0x91, 0x25, 0x59, 0x8d, 0x08, 0xea, + 0xc2, 0x89, 0x74, 0xb2, 0xf9, 0xb0, 0x62, 0xe7, 0x5f, 0x00, 0xd3, 0xf9, 0xe2, 0xe3, 0xcc, 0x95, + 0x69, 0x58, 0x88, 0xb3, 0xe8, 0xa3, 0x5b, 0xc9, 0x54, 0xe0, 0x65, 0xc3, 0xfe, 0x95, 0x4a, 0x07, + 0x3e, 0x51, 0x98, 0x05, 0x7c, 0x0b, 0xce, 0x6b, 0x79, 0x8d, 0xaf, 0x07, 0x0e, 0xf3, 0x90, 0x73, + 0xd9, 0x69, 0xa4, 0x09, 0xb5, 0xcf, 0x1e, 0xec, 0x57, 0xcf, 0x6f, 0x14, 0x21, 0xe2, 0x62, 0x3a, + 0xe8, 0x36, 0x9c, 0xe2, 0x91, 0x90, 0x96, 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x92, 0x9a, 0x39, 0xef, + 0x3a, 0x7b, 0xb0, 0x5f, 0x3d, 0x35, 0x9f, 0x85, 0x80, 0xb3, 0xeb, 0xa1, 0x77, 0x60, 0xb4, 0xe9, + 0x49, 0x2e, 0x3b, 0x64, 0xa4, 0x8e, 0x1e, 0x5d, 0x5a, 0xaf, 0xab, 0xef, 0x8f, 0xff, 0xe0, 0xb8, + 0x02, 0xda, 0xe2, 0x06, 0x58, 0xa5, 0x35, 0x1f, 0x4e, 0x05, 0xc8, 0x4d, 0x1a, 0x96, 0x8c, 0xd0, + 0x22, 0xdc, 0xf3, 0x40, 0x3d, 0x3f, 0x35, 0xa2, 0x8e, 0x18, 0x84, 0xd1, 0xfb, 0x80, 0x44, 0xbe, + 0xaf, 0xf9, 0x06, 0xcb, 0xa8, 0xa9, 0x79, 0x45, 0x2b, 0x3d, 0x49, 0x3d, 0x85, 0x81, 0x33, 0x6a, + 0xa1, 0x1b, 0x94, 0x3d, 0xea, 0xa5, 0x82, 0xfd, 0x4a, 0x7d, 0x56, 0x65, 0x89, 0x74, 0x02, 0xc2, + 0x1c, 0x79, 0x4d, 0x8a, 0x38, 0x51, 0x0f, 0x35, 0xe1, 0x9c, 0xd3, 0x8d, 0x7c, 0x66, 0xdb, 0x36, + 0x51, 0x37, 0xfc, 0x1d, 0xe2, 0x31, 0xb7, 0x92, 0x11, 0x16, 0x78, 0xf7, 0xdc, 0x7c, 0x01, 0x1e, + 0x2e, 0xa4, 0x42, 0xaf, 0x53, 0x74, 0x2c, 0x34, 0xb3, 0xb3, 0x11, 0x25, 0x81, 0xfb, 0x62, 0x48, + 0x0c, 0xf4, 0x26, 0x8c, 0x6d, 0xfb, 0x61, 0xb4, 0x4e, 0xa2, 0x07, 0x7e, 0xb0, 0x23, 0x12, 0x8c, + 0xc4, 0x49, 0x9d, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x4b, 0x30, 0xcc, 0x9c, 0x1e, 0x57, 0x97, 0xd8, + 0x59, 0x3b, 0x12, 0xf3, 0x98, 0x1b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0xad, 0x2d, 0x32, 0x76, + 0x9c, 0x40, 0x5d, 0xad, 0x2d, 0x62, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x76, 0x02, 0x52, 0x0b, 0xfc, + 0x06, 0x09, 0xb5, 0x54, 0x62, 0xcf, 0xf0, 0xf4, 0x29, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, 0x76, + 0x3d, 0x44, 0xd2, 0x39, 0xbd, 0x27, 0xf3, 0x8d, 0xfe, 0x69, 0x71, 0xb0, 0xcf, 0xb4, 0xde, 0x1e, + 0x4c, 0xab, 0x6c, 0xe2, 0x3c, 0x61, 0x4a, 0x58, 0x99, 0xca, 0xcf, 0xe9, 0x9f, 0xf9, 0xd6, 0x47, + 0xb9, 0x51, 0xac, 0x26, 0x28, 0xe1, 0x14, 0x6d, 0x23, 0xb2, 0xf3, 0x74, 0xcf, 0xc8, 0xce, 0x57, + 0x60, 0x34, 0xec, 0xde, 0x6f, 0xfa, 0x6d, 0xc7, 0xf5, 0x98, 0xef, 0x98, 0x76, 0x71, 0xaf, 0x4b, + 0x00, 0x8e, 0x71, 0xd0, 0x0a, 0x8c, 0x38, 0xd2, 0x47, 0x02, 0xe5, 0x07, 0xad, 0x54, 0x9e, 0x11, + 0x3c, 0x8e, 0x9b, 0xf4, 0x8a, 0x50, 0x75, 0xd1, 0xdb, 0x30, 0x21, 0x02, 0xe3, 0x08, 0x7d, 0xfc, + 0x09, 0xf3, 0x29, 0x7f, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xdd, 0x81, 0xb1, 0xc8, 0x6f, 0x09, 0x45, + 0x6e, 0x58, 0x39, 0x9d, 0x1f, 0x5b, 0x7a, 0x43, 0xa1, 0xe9, 0xd6, 0x3b, 0x55, 0x15, 0xeb, 0x74, + 0xd0, 0x06, 0x5f, 0xef, 0x2c, 0x71, 0x18, 0x09, 0x2b, 0x67, 0xf2, 0xcf, 0x24, 0x95, 0x5f, 0xcc, + 0xdc, 0x0e, 0xa2, 0x26, 0xd6, 0xc9, 0xa0, 0xeb, 0x30, 0xd3, 0x09, 0x5c, 0x9f, 0xad, 0x09, 0xe5, + 0xf3, 0x51, 0x31, 0x75, 0x48, 0xb5, 0x24, 0x02, 0x4e, 0xd7, 0x61, 0x71, 0x8d, 0x44, 0x61, 0xe5, + 0x2c, 0x4f, 0x75, 0xc8, 0xf5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x63, 0x9c, 0x98, 0xeb, 0x29, + 0x2b, 0xb3, 0xf9, 0xd1, 0x32, 0x74, 0x7d, 0x26, 0x97, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x4d, + 0x98, 0x0c, 0xf4, 0x1b, 0x70, 0x58, 0x39, 0x57, 0xe0, 0x79, 0x9e, 0xb8, 0x2e, 0xc7, 0x02, 0x81, + 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1e, 0x4c, 0x8b, 0xa0, 0x1f, 0xf1, 0x30, 0x9d, 0x8f, 0x5f, + 0xe7, 0xe1, 0x04, 0x0c, 0xa7, 0xb0, 0x79, 0xaa, 0x41, 0xe7, 0x7e, 0x8b, 0x08, 0xd6, 0x77, 0xcb, + 0xf5, 0x76, 0xc2, 0xca, 0x05, 0xc6, 0x1f, 0x44, 0xaa, 0xc1, 0x24, 0x14, 0x67, 0xd4, 0x40, 0x1b, + 0x30, 0xdd, 0x09, 0x08, 0x69, 0xb3, 0x7b, 0x92, 0x38, 0xcf, 0xaa, 0x3c, 0xac, 0x17, 0xed, 0x49, + 0x2d, 0x01, 0x3b, 0xcc, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x01, 0x8c, 0xf8, 0xbb, 0x24, 0xd8, 0x26, + 0x4e, 0xb3, 0x72, 0xb1, 0xe0, 0xcd, 0xa8, 0x38, 0xdc, 0x6e, 0x0b, 0xdc, 0x84, 0x4b, 0x9d, 0x2c, + 0xee, 0xed, 0x52, 0x27, 0x1b, 0x43, 0xff, 0x89, 0x05, 0x67, 0xa5, 0x91, 0xba, 0xde, 0xa1, 0xa3, + 0xbe, 0xe8, 0x7b, 0x61, 0x14, 0xf0, 0x40, 0x54, 0xcf, 0xe6, 0x07, 0x67, 0xda, 0xc8, 0xa9, 0xa4, + 0x4c, 0x25, 0x67, 0xf3, 0x30, 0x42, 0x9c, 0xdf, 0x22, 0xbd, 0xd9, 0x87, 0x24, 0x92, 0xcc, 0x68, + 0x3e, 0x5c, 0xf9, 0x60, 0x69, 0xbd, 0xf2, 0x1c, 0x8f, 0xa2, 0x45, 0x37, 0x43, 0x3d, 0x09, 0xc4, + 0x69, 0x7c, 0x74, 0x15, 0x4a, 0x7e, 0x58, 0x79, 0x9e, 0xad, 0xed, 0xb3, 0x39, 0xe3, 0x78, 0xbb, + 0xce, 0x5d, 0xab, 0x6f, 0xd7, 0x71, 0xc9, 0x0f, 0x65, 0xba, 0x3f, 0x7a, 0x9d, 0x0d, 0x2b, 0x2f, + 0x70, 0xc5, 0xba, 0x4c, 0xf7, 0xc7, 0x0a, 0x71, 0x0c, 0x47, 0xdb, 0x30, 0x15, 0x1a, 0x6a, 0x83, + 0xb0, 0x72, 0x89, 0x8d, 0xd4, 0x0b, 0x79, 0x93, 0x66, 0x60, 0x6b, 0x79, 0xb8, 0x4c, 0x2a, 0x38, + 0x49, 0x96, 0xef, 0x2e, 0x4d, 0x71, 0x11, 0x56, 0x5e, 0xec, 0xb1, 0xbb, 0x34, 0x64, 0x7d, 0x77, + 0xe9, 0x34, 0x70, 0x82, 0x26, 0xba, 0xa3, 0x3f, 0xc8, 0xbd, 0x9c, 0xef, 0xa6, 0x9b, 0xf9, 0x14, + 0x77, 0x22, 0xf7, 0x19, 0xee, 0x7b, 0x30, 0x2d, 0xcf, 0x12, 0xba, 0x32, 0x03, 0xb7, 0x49, 0x2a, + 0x2f, 0xc5, 0x9b, 0xf6, 0x46, 0x02, 0x86, 0x53, 0xd8, 0xb3, 0x3f, 0x02, 0x33, 0x29, 0x39, 0xee, + 0x28, 0xef, 0x9b, 0x66, 0x77, 0x60, 0xc2, 0xd8, 0x2b, 0x4f, 0xd7, 0xfd, 0x6d, 0x0c, 0x46, 0x95, + 0x5b, 0x52, 0x8e, 0x39, 0x72, 0xe6, 0xb1, 0xcc, 0x91, 0x57, 0x4c, 0xef, 0xb9, 0xb3, 0x49, 0xef, + 0xb9, 0x91, 0x9a, 0xdf, 0x34, 0x1c, 0xe6, 0x36, 0x32, 0x22, 0x60, 0xe7, 0x71, 0xf9, 0xfe, 0x1f, + 0x74, 0x6a, 0x16, 0xbd, 0x72, 0xdf, 0x6e, 0x78, 0x03, 0x85, 0x46, 0xc2, 0xeb, 0x30, 0xe3, 0xf9, + 0xec, 0x22, 0x42, 0x9a, 0x52, 0xca, 0x64, 0xc2, 0xe4, 0xa8, 0x1e, 0xa1, 0x31, 0x81, 0x80, 0xd3, + 0x75, 0x68, 0x83, 0x5c, 0x1a, 0x4c, 0x5a, 0x25, 0xb9, 0xb0, 0x88, 0x05, 0x94, 0x5e, 0x80, 0xf9, + 0xaf, 0xb0, 0x32, 0x9d, 0x7f, 0x01, 0xe6, 0x95, 0x92, 0x12, 0x67, 0x28, 0x25, 0x4e, 0x66, 0x84, + 0xeb, 0xf8, 0xcd, 0xd5, 0x9a, 0xb8, 0xcb, 0x68, 0xb9, 0x29, 0x9a, 0xab, 0x35, 0xcc, 0x61, 0x68, + 0x1e, 0x86, 0xd8, 0x0f, 0x19, 0xf9, 0x2a, 0x8f, 0x17, 0xad, 0xd6, 0xb4, 0x9c, 0xca, 0xac, 0x02, + 0x16, 0x15, 0x99, 0xfd, 0x81, 0x5e, 0x00, 0x99, 0xfd, 0x61, 0xf8, 0x31, 0xed, 0x0f, 0x92, 0x00, + 0x8e, 0x69, 0xa1, 0x87, 0x70, 0xca, 0xb8, 0x74, 0xab, 0x17, 0xae, 0x90, 0xef, 0x64, 0x93, 0x40, + 0x5e, 0x38, 0x2f, 0x3a, 0x7d, 0x6a, 0x35, 0x8b, 0x12, 0xce, 0x6e, 0x00, 0xb5, 0x60, 0xa6, 0x91, + 0x6a, 0x75, 0xa4, 0xff, 0x56, 0xd5, 0xba, 0x48, 0xb7, 0x98, 0x26, 0x8c, 0xde, 0x86, 0x91, 0x8f, + 0x7c, 0xee, 0x10, 0x2b, 0xee, 0x5f, 0x32, 0x3e, 0xd3, 0xc8, 0x07, 0xb7, 0xeb, 0xac, 0xfc, 0x70, + 0xbf, 0x3a, 0x56, 0xf3, 0x9b, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0x5f, 0xb2, 0x60, 0x36, 0x7d, 0xab, + 0x57, 0x9d, 0x9e, 0xe8, 0xbf, 0xd3, 0xb6, 0x68, 0x74, 0x76, 0x39, 0x97, 0x1c, 0x2e, 0x68, 0x0a, + 0x7d, 0x91, 0xee, 0xa7, 0xd0, 0x7d, 0xc4, 0x5f, 0xb8, 0x68, 0x0e, 0x09, 0x98, 0x95, 0x1e, 0xee, + 0x57, 0xa7, 0x38, 0xfb, 0x77, 0x1f, 0xa9, 0x2c, 0x1a, 0xbc, 0x02, 0xfa, 0x31, 0x38, 0x15, 0xa4, + 0xb5, 0xec, 0x44, 0xde, 0x34, 0x3e, 0xdb, 0xcf, 0x51, 0x92, 0x9c, 0x70, 0x9c, 0x45, 0x10, 0x67, + 0xb7, 0x83, 0xfe, 0xaa, 0x05, 0xcf, 0x90, 0x7c, 0x0b, 0xae, 0xb8, 0x2a, 0xbc, 0x96, 0xd3, 0x8f, + 0x02, 0xdb, 0x2f, 0x4b, 0x30, 0xf0, 0x4c, 0x01, 0x02, 0x2e, 0x6a, 0xd7, 0xfe, 0xc7, 0x16, 0xb3, + 0xfa, 0x08, 0x54, 0x12, 0x76, 0x5b, 0xd1, 0x31, 0x38, 0xc7, 0x2e, 0x1b, 0xae, 0x25, 0x8f, 0xed, + 0xdd, 0xfa, 0xdf, 0x5a, 0xcc, 0xbb, 0xf5, 0x18, 0xdf, 0xe9, 0x7e, 0x00, 0x23, 0x91, 0x68, 0x4d, + 0x74, 0x3d, 0xcf, 0x13, 0x4f, 0x76, 0x8a, 0x79, 0xf8, 0xaa, 0x1b, 0xa6, 0x2c, 0xc5, 0x8a, 0x8c, + 0xfd, 0x5f, 0xf1, 0x19, 0x90, 0x90, 0x63, 0x30, 0x6e, 0x2f, 0x99, 0xc6, 0xed, 0x6a, 0x8f, 0x2f, + 0xc8, 0x31, 0x72, 0xff, 0x03, 0xb3, 0xdf, 0x4c, 0xb3, 0xfa, 0x49, 0x77, 0xab, 0xb6, 0xbf, 0x6b, + 0x01, 0xc4, 0xe9, 0x94, 0xfa, 0x48, 0x8c, 0x7f, 0x8d, 0xde, 0x29, 0xfd, 0xc8, 0x6f, 0xf8, 0x2d, + 0x61, 0x5c, 0x3b, 0x17, 0xdb, 0xd7, 0x79, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, 0x6c, 0x54, 0x95, 0xf1, + 0xcd, 0xcb, 0xb1, 0x5b, 0x8b, 0x11, 0xdb, 0xfc, 0x67, 0x2c, 0x38, 0x99, 0xf5, 0xe8, 0x0b, 0xbd, + 0x02, 0x23, 0x5c, 0xc7, 0xac, 0x5c, 0xde, 0xd5, 0x6c, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xf4, 0xeb, + 0xfa, 0x7e, 0xc4, 0x54, 0x3f, 0xb7, 0x61, 0xa2, 0x16, 0x10, 0x4d, 0xee, 0x79, 0x37, 0xce, 0x42, + 0x36, 0xba, 0xf0, 0xca, 0x91, 0x23, 0xa9, 0xd9, 0xbf, 0x54, 0x82, 0x93, 0xdc, 0x71, 0x73, 0x7e, + 0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x29, 0x9e, 0xea, 0x7f, 0x15, 0xc6, 0x3b, 0x9a, 0x61, 0xa0, 0x28, + 0x6d, 0x85, 0x6e, 0x40, 0x88, 0x55, 0x99, 0x7a, 0x29, 0x36, 0x68, 0xa1, 0x26, 0x8c, 0x93, 0x5d, + 0xb7, 0xa1, 0x1c, 0xc3, 0x4a, 0x47, 0x16, 0x1e, 0x54, 0x2b, 0xcb, 0x1a, 0x1d, 0x6c, 0x50, 0xed, + 0xfb, 0xb9, 0x85, 0x26, 0x3a, 0x0e, 0xf4, 0x70, 0x06, 0xfb, 0x59, 0x0b, 0xce, 0xe4, 0x24, 0xb9, + 0xa0, 0xcd, 0x3d, 0x60, 0x2e, 0xb2, 0x62, 0xd9, 0xaa, 0xe6, 0xb8, 0xe3, 0x2c, 0x16, 0x50, 0xf4, + 0x65, 0x80, 0x4e, 0x9c, 0x1a, 0xb8, 0x47, 0x36, 0x00, 0x23, 0x2e, 0xb8, 0x16, 0xe2, 0x59, 0x65, + 0x10, 0xd6, 0x68, 0xd9, 0x3f, 0x33, 0x00, 0x83, 0xcc, 0x07, 0x0f, 0xd5, 0x60, 0x78, 0x9b, 0x47, + 0x20, 0x2d, 0x9c, 0x37, 0x8a, 0x2b, 0x43, 0x9a, 0xc6, 0xf3, 0xa6, 0x95, 0x62, 0x49, 0x06, 0xad, + 0xc1, 0x09, 0x9e, 0xf6, 0xb8, 0xb5, 0x44, 0x5a, 0xce, 0x9e, 0xd4, 0xb9, 0x97, 0xd8, 0xa7, 0x2a, + 0xdb, 0xc3, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc2, 0x64, 0xe4, 0xb6, 0x89, 0xdf, 0x8d, + 0x4c, 0x77, 0x53, 0x75, 0x2d, 0xdc, 0x30, 0xa0, 0x38, 0x81, 0x8d, 0xde, 0x86, 0x89, 0x4e, 0xca, + 0xba, 0x30, 0x18, 0xab, 0xe1, 0x4c, 0x8b, 0x82, 0x89, 0xcb, 0xde, 0x7d, 0x75, 0xd9, 0x2b, 0xb7, + 0x8d, 0xed, 0x80, 0x84, 0xdb, 0x7e, 0xab, 0xc9, 0x24, 0xf3, 0x41, 0xed, 0xdd, 0x57, 0x02, 0x8e, + 0x53, 0x35, 0x28, 0x95, 0x4d, 0xc7, 0x6d, 0x75, 0x03, 0x12, 0x53, 0x19, 0x32, 0xa9, 0xac, 0x24, + 0xe0, 0x38, 0x55, 0xa3, 0xb7, 0xd9, 0x64, 0xf8, 0xc9, 0x98, 0x4d, 0xec, 0xbf, 0x5b, 0x02, 0x63, + 0x6a, 0x7f, 0x88, 0xb3, 0x18, 0xbf, 0x03, 0x03, 0x5b, 0x41, 0xa7, 0x21, 0xfc, 0x4d, 0x33, 0xbf, + 0xec, 0x3a, 0xae, 0x2d, 0xea, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0x53, 0xc2, 0xfb, + 0x5a, 0x06, 0x29, 0x56, 0xcf, 0x2b, 0x87, 0xa5, 0x26, 0xa2, 0x20, 0x9c, 0xbf, 0x78, 0x23, 0xa6, + 0xfc, 0xb7, 0x35, 0x53, 0xb8, 0xd0, 0x43, 0x48, 0x2a, 0xe8, 0x2a, 0x8c, 0x89, 0xc4, 0xb2, 0xec, + 0x15, 0x20, 0xdf, 0x4c, 0xcc, 0x95, 0x74, 0x29, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0x97, 0x4b, 0x70, + 0x22, 0xe3, 0x19, 0x37, 0x3f, 0x46, 0xb6, 0xdc, 0x30, 0x0a, 0xf6, 0x92, 0x87, 0x13, 0x16, 0xe5, + 0x58, 0x61, 0x50, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0x9e, 0x49, 0x0a, 0xe8, 0xd1, 0x0e, + 0x27, 0x7a, 0x6c, 0x77, 0x43, 0x22, 0x33, 0x87, 0xa8, 0x63, 0x9b, 0xb9, 0x64, 0x30, 0x08, 0xbd, + 0x9a, 0x6e, 0x29, 0x3f, 0x03, 0xed, 0x6a, 0xca, 0x3d, 0x0d, 0x38, 0x8c, 0x76, 0x2e, 0x22, 0x9e, + 0xe3, 0x45, 0xe2, 0x02, 0x1b, 0x47, 0x94, 0x67, 0xa5, 0x58, 0x40, 0xed, 0xef, 0x95, 0xe1, 0x6c, + 0x6e, 0x60, 0x07, 0xda, 0xf5, 0xb6, 0xef, 0xb9, 0x91, 0xaf, 0x7c, 0x74, 0x79, 0x14, 0x79, 0xd2, + 0xd9, 0x5e, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x4b, 0x30, 0xc8, 0x2c, 0x12, 0xc9, 0xa4, 0x92, 0x78, + 0x61, 0x89, 0xc7, 0xd8, 0xe5, 0x60, 0xed, 0x54, 0x2f, 0x17, 0x9e, 0xea, 0xcf, 0x51, 0x09, 0xc6, + 0x6f, 0x25, 0x0f, 0x14, 0xda, 0x5d, 0xdf, 0x6f, 0x61, 0x06, 0x44, 0x2f, 0x88, 0xf1, 0x4a, 0x38, + 0xa5, 0x62, 0xa7, 0xe9, 0x87, 0xda, 0xa0, 0x71, 0x07, 0xf8, 0xc0, 0xf5, 0xb6, 0x92, 0xce, 0xca, + 0x37, 0x79, 0x31, 0x96, 0x70, 0xba, 0x97, 0xe2, 0xdc, 0xf8, 0xc3, 0xf9, 0x7b, 0x49, 0x65, 0xc0, + 0xef, 0x99, 0x16, 0x5f, 0x5f, 0x01, 0x23, 0x3d, 0xc5, 0x93, 0x9f, 0x2a, 0xc3, 0x14, 0x5e, 0x58, + 0xfa, 0x74, 0x22, 0xee, 0xa4, 0x27, 0xa2, 0x7f, 0xb3, 0xd9, 0x93, 0x9a, 0x8d, 0x7f, 0x68, 0xc1, + 0x14, 0x4b, 0x6f, 0x2b, 0xa2, 0x32, 0xb9, 0xbe, 0x77, 0x0c, 0x57, 0x81, 0xe7, 0x60, 0x30, 0xa0, + 0x8d, 0x8a, 0x19, 0x54, 0x7b, 0x9c, 0xf5, 0x04, 0x73, 0x18, 0x3a, 0x07, 0x03, 0xac, 0x0b, 0x74, + 0xf2, 0xc6, 0x39, 0x0b, 0x5e, 0x72, 0x22, 0x07, 0xb3, 0x52, 0x16, 0x1f, 0x16, 0x93, 0x4e, 0xcb, + 0xe5, 0x9d, 0x8e, 0xfd, 0x45, 0x3e, 0x19, 0x21, 0x9f, 0x32, 0xbb, 0xf6, 0xf1, 0xe2, 0xc3, 0x66, + 0x93, 0x2c, 0xbe, 0x66, 0xff, 0x71, 0x09, 0x2e, 0x64, 0xd6, 0xeb, 0x3b, 0x3e, 0x6c, 0x71, 0xed, + 0xa7, 0x99, 0x0c, 0xb3, 0x7c, 0x8c, 0x4f, 0x41, 0x06, 0xfa, 0x95, 0xfe, 0x07, 0xfb, 0x08, 0xdb, + 0x9a, 0x39, 0x64, 0x9f, 0x90, 0xb0, 0xad, 0x99, 0x7d, 0xcb, 0x51, 0x13, 0xfc, 0x69, 0x29, 0xe7, + 0x5b, 0x98, 0xc2, 0xe0, 0x32, 0xe5, 0x33, 0x0c, 0x18, 0xca, 0x4b, 0x38, 0xe7, 0x31, 0xbc, 0x0c, + 0x2b, 0x28, 0x9a, 0x87, 0xa9, 0xb6, 0xeb, 0x51, 0xe6, 0xb3, 0x67, 0x8a, 0xe2, 0xca, 0x90, 0xb4, + 0x66, 0x82, 0x71, 0x12, 0x1f, 0xb9, 0x5a, 0x48, 0x57, 0xfe, 0x75, 0x6f, 0x1f, 0x69, 0xd7, 0xcd, + 0x99, 0xbe, 0x34, 0x6a, 0x14, 0x33, 0xc2, 0xbb, 0xae, 0x69, 0x7a, 0xa2, 0x72, 0xff, 0x7a, 0xa2, + 0xf1, 0x6c, 0x1d, 0xd1, 0xec, 0xdb, 0x30, 0xf1, 0xd8, 0xf6, 0x1f, 0xfb, 0xfb, 0x65, 0x78, 0xa6, + 0x60, 0xdb, 0x73, 0x5e, 0x6f, 0xcc, 0x81, 0xc6, 0xeb, 0x53, 0xf3, 0x50, 0x83, 0x93, 0x9b, 0xdd, + 0x56, 0x6b, 0x8f, 0x3d, 0x6c, 0x25, 0x4d, 0x89, 0x21, 0x64, 0x4a, 0xf5, 0xf4, 0x6d, 0x25, 0x03, + 0x07, 0x67, 0xd6, 0xa4, 0x57, 0x2c, 0x7a, 0x92, 0xec, 0x29, 0x52, 0x89, 0x2b, 0x16, 0xd6, 0x81, + 0xd8, 0xc4, 0x45, 0xd7, 0x61, 0xc6, 0xd9, 0x75, 0x5c, 0x9e, 0x4c, 0x48, 0x12, 0xe0, 0x77, 0x2c, + 0xa5, 0x23, 0x9f, 0x4f, 0x22, 0xe0, 0x74, 0x9d, 0x1c, 0x53, 0x55, 0xf9, 0xb1, 0x4c, 0x55, 0x66, + 0x70, 0xd1, 0xa1, 0xfc, 0xe0, 0xa2, 0xc5, 0x7c, 0xb1, 0x67, 0x1e, 0xd6, 0x0f, 0x61, 0xe2, 0xa8, + 0x3e, 0xf1, 0x2f, 0xc1, 0xb0, 0x78, 0xc3, 0x93, 0x7c, 0xaf, 0x29, 0xf3, 0xff, 0x4b, 0xb8, 0xfd, + 0xbf, 0x5a, 0xa0, 0x74, 0xdc, 0x66, 0x1e, 0x81, 0xb7, 0x99, 0x83, 0x3f, 0xd7, 0xce, 0x6b, 0x6f, + 0x45, 0x4f, 0x69, 0x0e, 0xfe, 0x31, 0x10, 0x9b, 0xb8, 0x7c, 0xb9, 0x85, 0x71, 0xc4, 0x1a, 0xe3, + 0x02, 0x21, 0x6c, 0xab, 0x0a, 0x03, 0x7d, 0x05, 0x86, 0x9b, 0xee, 0xae, 0x1b, 0x0a, 0x3d, 0xda, + 0x91, 0x6d, 0x93, 0xf1, 0xf7, 0x2d, 0x71, 0x32, 0x58, 0xd2, 0xb3, 0xff, 0x8a, 0x05, 0xca, 0x28, + 0x7c, 0x83, 0x38, 0xad, 0x68, 0x1b, 0xbd, 0x07, 0x20, 0x29, 0x28, 0xdd, 0x9b, 0x74, 0x55, 0x03, + 0xac, 0x20, 0x87, 0xc6, 0x3f, 0xac, 0xd5, 0x41, 0xef, 0xc2, 0xd0, 0x36, 0xa3, 0x25, 0xbe, 0xed, + 0x92, 0x32, 0xc1, 0xb1, 0xd2, 0xc3, 0xfd, 0xea, 0x49, 0xb3, 0x4d, 0x79, 0x8a, 0xf1, 0x5a, 0xf6, + 0x4f, 0x95, 0xe2, 0x39, 0xfd, 0xa0, 0xeb, 0x47, 0xce, 0x31, 0x48, 0x22, 0xd7, 0x0d, 0x49, 0xe4, + 0x85, 0x22, 0xab, 0x37, 0xeb, 0x52, 0xae, 0x04, 0x72, 0x3b, 0x21, 0x81, 0xbc, 0xd8, 0x9b, 0x54, + 0xb1, 0xe4, 0xf1, 0x5f, 0x5b, 0x30, 0x63, 0xe0, 0x1f, 0xc3, 0x01, 0xb8, 0x62, 0x1e, 0x80, 0xcf, + 0xf6, 0xfc, 0x86, 0x9c, 0x83, 0xef, 0x27, 0xca, 0x89, 0xbe, 0xb3, 0x03, 0xef, 0x23, 0x18, 0xd8, + 0x76, 0x82, 0xa6, 0xb8, 0xd7, 0x5f, 0xe9, 0x6b, 0xac, 0xe7, 0x6e, 0x38, 0x81, 0x70, 0x73, 0x79, + 0x45, 0x8e, 0x3a, 0x2d, 0xea, 0xe9, 0xe2, 0xc2, 0x9a, 0x42, 0xd7, 0x60, 0x28, 0x6c, 0xf8, 0x1d, + 0xf5, 0x24, 0xf4, 0x22, 0x1b, 0x68, 0x56, 0x72, 0xb8, 0x5f, 0x45, 0x66, 0x73, 0xb4, 0x18, 0x0b, + 0x7c, 0xf4, 0x55, 0x98, 0x60, 0xbf, 0x94, 0xcf, 0x69, 0x39, 0x5f, 0x03, 0x53, 0xd7, 0x11, 0xb9, + 0x43, 0xb6, 0x51, 0x84, 0x4d, 0x52, 0xb3, 0x5b, 0x30, 0xaa, 0x3e, 0xeb, 0xa9, 0x7a, 0x24, 0xfc, + 0x8b, 0x32, 0x9c, 0xc8, 0x58, 0x73, 0x28, 0x34, 0x66, 0xe2, 0x6a, 0x9f, 0x4b, 0xf5, 0x63, 0xce, + 0x45, 0xc8, 0x2e, 0x80, 0x4d, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x4e, 0x48, 0x92, 0x8d, 0xd2, 0xa2, + 0xde, 0x8d, 0xd2, 0xc6, 0x8e, 0x6d, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x4f, 0x75, 0x4e, 0x7f, 0x6b, + 0x00, 0x4e, 0x66, 0x39, 0xe2, 0xa0, 0x1f, 0x85, 0x21, 0xf6, 0x9c, 0xaf, 0xf0, 0xfd, 0x6b, 0x56, + 0xcd, 0x39, 0xf6, 0x22, 0x50, 0x84, 0xa2, 0x9e, 0x93, 0xec, 0x88, 0x17, 0xf6, 0x1c, 0x66, 0xd1, + 0x26, 0x0b, 0x11, 0x27, 0x4e, 0x4f, 0xc9, 0x3e, 0x3e, 0xdf, 0x77, 0x07, 0xc4, 0xf9, 0x1b, 0x26, + 0xfc, 0xd9, 0x64, 0x71, 0x6f, 0x7f, 0x36, 0xd9, 0x32, 0x5a, 0x85, 0xa1, 0x06, 0x77, 0x94, 0x2a, + 0xf7, 0x66, 0x61, 0xdc, 0x4b, 0x4a, 0x31, 0x60, 0xe1, 0x1d, 0x25, 0x08, 0xcc, 0xba, 0x30, 0xa6, + 0x0d, 0xcc, 0x53, 0x5d, 0x3c, 0x3b, 0xf4, 0xe0, 0xd3, 0x86, 0xe0, 0xa9, 0x2e, 0xa0, 0xbf, 0xae, + 0x9d, 0xfd, 0x82, 0x1f, 0x7c, 0xce, 0x90, 0x9d, 0xce, 0x25, 0x1e, 0x59, 0x26, 0xf6, 0x15, 0x93, + 0xa5, 0xea, 0x66, 0x0e, 0x87, 0xdc, 0x44, 0x74, 0xe6, 0x81, 0x5f, 0x9c, 0xb7, 0xc1, 0xfe, 0x59, + 0x0b, 0x12, 0xcf, 0xe0, 0x94, 0xba, 0xd3, 0xca, 0x55, 0x77, 0x5e, 0x84, 0x81, 0xc0, 0x6f, 0x49, + 0x79, 0x4a, 0x61, 0x60, 0xbf, 0x45, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x56, 0x62, 0x8d, 0xeb, 0x17, + 0x74, 0x71, 0xf5, 0x7e, 0x0e, 0x06, 0x5b, 0x64, 0x97, 0xb4, 0x92, 0xf9, 0x98, 0x6f, 0xd1, 0x42, + 0xcc, 0x61, 0xf6, 0x3f, 0x1c, 0x80, 0xf3, 0x85, 0x91, 0x24, 0xa9, 0x80, 0xb9, 0xe5, 0x44, 0xe4, + 0x81, 0xb3, 0x97, 0xcc, 0x43, 0x7a, 0x9d, 0x17, 0x63, 0x09, 0x67, 0xef, 0xee, 0x79, 0x6e, 0xad, + 0x84, 0x72, 0x58, 0xa4, 0xd4, 0x12, 0x50, 0x53, 0xd9, 0x58, 0x7e, 0x12, 0xca, 0xc6, 0xd7, 0x00, + 0xc2, 0xb0, 0xc5, 0xbd, 0x5d, 0x9b, 0xe2, 0x41, 0x7f, 0x1c, 0xe9, 0xa4, 0x7e, 0x4b, 0x40, 0xb0, + 0x86, 0x85, 0x96, 0x60, 0xba, 0x13, 0xf8, 0x11, 0xd7, 0xb5, 0x2f, 0x71, 0x87, 0xf0, 0x41, 0x33, + 0x88, 0x5f, 0x2d, 0x01, 0xc7, 0xa9, 0x1a, 0xe8, 0x4d, 0x18, 0x13, 0x81, 0xfd, 0x6a, 0xbe, 0xdf, + 0x12, 0xea, 0x3d, 0xe5, 0x23, 0x5d, 0x8f, 0x41, 0x58, 0xc7, 0xd3, 0xaa, 0x31, 0x05, 0xfe, 0x70, + 0x66, 0x35, 0xae, 0xc4, 0xd7, 0xf0, 0x12, 0x49, 0x40, 0x46, 0xfa, 0x4a, 0x02, 0x12, 0x2b, 0x3c, + 0x47, 0xfb, 0xb6, 0x27, 0x43, 0x4f, 0x15, 0xe1, 0xaf, 0x0c, 0xc0, 0x09, 0xb1, 0x70, 0x9e, 0xf6, + 0x72, 0xb9, 0x93, 0x5e, 0x2e, 0x4f, 0x42, 0x25, 0xfa, 0xe9, 0x9a, 0x39, 0xee, 0x35, 0xf3, 0xd3, + 0x16, 0x98, 0x32, 0x24, 0xfa, 0x8f, 0x72, 0x13, 0x39, 0xbf, 0x99, 0x2b, 0x93, 0xc6, 0x19, 0x02, + 0x3e, 0x5e, 0x4a, 0x67, 0xfb, 0x7f, 0xb6, 0xe0, 0xd9, 0x9e, 0x14, 0xd1, 0x32, 0x8c, 0x32, 0x41, + 0x57, 0xbb, 0x17, 0xbf, 0xa8, 0x1e, 0x8c, 0x48, 0x40, 0x8e, 0xdc, 0x1d, 0xd7, 0x44, 0xcb, 0xa9, + 0x8c, 0xd9, 0x2f, 0x65, 0x64, 0xcc, 0x3e, 0x65, 0x0c, 0xcf, 0x63, 0xa6, 0xcc, 0xfe, 0x49, 0x7a, + 0xe2, 0x98, 0xaf, 0x4e, 0x3f, 0x6f, 0xa8, 0x73, 0xed, 0x84, 0x3a, 0x17, 0x99, 0xd8, 0xda, 0x19, + 0xf2, 0x1e, 0x4c, 0xb3, 0x88, 0xbf, 0xec, 0xf9, 0x92, 0x78, 0xae, 0x5a, 0x8a, 0xbd, 0x9d, 0x6f, + 0x25, 0x60, 0x38, 0x85, 0x6d, 0xff, 0x9b, 0x32, 0x0c, 0xf1, 0xed, 0x77, 0x0c, 0x17, 0xdf, 0x97, + 0x61, 0xd4, 0x6d, 0xb7, 0xbb, 0x3c, 0x09, 0xf2, 0x60, 0xec, 0xf0, 0xbe, 0x2a, 0x0b, 0x71, 0x0c, + 0x47, 0x2b, 0xc2, 0x92, 0x50, 0x90, 0x54, 0x80, 0x77, 0x7c, 0x6e, 0xc9, 0x89, 0x1c, 0x2e, 0xc5, + 0xa9, 0x73, 0x36, 0xb6, 0x39, 0xa0, 0x6f, 0x00, 0x84, 0x51, 0xe0, 0x7a, 0x5b, 0xb4, 0x4c, 0x64, + 0x9e, 0xf9, 0x6c, 0x01, 0xb5, 0xba, 0x42, 0xe6, 0x34, 0x63, 0x9e, 0xa3, 0x00, 0x58, 0xa3, 0x88, + 0xe6, 0x8c, 0x93, 0x7e, 0x36, 0x31, 0x77, 0xc0, 0xa9, 0xc6, 0x73, 0x36, 0xfb, 0x05, 0x18, 0x55, + 0xc4, 0x7b, 0xe9, 0x15, 0xc7, 0x75, 0x81, 0xed, 0x4b, 0x30, 0x95, 0xe8, 0xdb, 0x91, 0xd4, 0x92, + 0xbf, 0x6e, 0xc1, 0x14, 0xef, 0xcc, 0xb2, 0xb7, 0x2b, 0x4e, 0x83, 0x47, 0x70, 0xb2, 0x95, 0xc1, + 0x95, 0xc5, 0xf4, 0xf7, 0xcf, 0xc5, 0x95, 0x1a, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9, + 0x8e, 0xa3, 0x5c, 0xd7, 0x69, 0x89, 0x98, 0x2b, 0xe3, 0x7c, 0xb7, 0xf1, 0x32, 0xac, 0xa0, 0xf6, + 0x1f, 0x58, 0x30, 0xc3, 0x7b, 0x7e, 0x93, 0xec, 0x29, 0xde, 0xf4, 0x83, 0xec, 0xbb, 0x48, 0xbf, + 0x5f, 0xca, 0x49, 0xbf, 0xaf, 0x7f, 0x5a, 0xb9, 0xf0, 0xd3, 0x7e, 0xc9, 0x02, 0xb1, 0x42, 0x8e, + 0x41, 0xd3, 0xf2, 0x23, 0xa6, 0xa6, 0x65, 0x36, 0x7f, 0x13, 0xe4, 0xa8, 0x58, 0xfe, 0xbd, 0x05, + 0xd3, 0x1c, 0x41, 0x8b, 0x62, 0xf7, 0x83, 0x9c, 0x87, 0x05, 0xf3, 0x8b, 0x32, 0xdd, 0x5a, 0x6f, + 0x92, 0xbd, 0x0d, 0xbf, 0xe6, 0x44, 0xdb, 0xd9, 0x1f, 0x65, 0x4c, 0xd6, 0x40, 0xe1, 0x64, 0x35, + 0xe5, 0x06, 0x32, 0x12, 0xad, 0xf6, 0x50, 0x00, 0x1f, 0x35, 0xd1, 0xaa, 0xfd, 0x47, 0x16, 0x20, + 0xde, 0x8c, 0x21, 0xb8, 0x51, 0x71, 0x88, 0x95, 0x66, 0x06, 0x0b, 0x54, 0x10, 0xac, 0x61, 0x3d, + 0x91, 0xe1, 0x49, 0xb8, 0xb2, 0x94, 0x7b, 0xbb, 0xb2, 0x1c, 0x61, 0x44, 0x7f, 0x69, 0x18, 0x92, + 0x0f, 0x56, 0xd1, 0x5d, 0x18, 0x6f, 0x38, 0x1d, 0xe7, 0xbe, 0xdb, 0x72, 0x23, 0x97, 0x84, 0x45, + 0x7e, 0x6e, 0x8b, 0x1a, 0x9e, 0x70, 0x3e, 0xd0, 0x4a, 0xb0, 0x41, 0x07, 0xcd, 0x01, 0x74, 0x02, + 0x77, 0xd7, 0x6d, 0x91, 0x2d, 0xa6, 0x10, 0x62, 0x51, 0x9e, 0xb8, 0xd3, 0x9d, 0x2c, 0xc5, 0x1a, + 0x46, 0x46, 0x70, 0x95, 0xf2, 0x53, 0x0e, 0xae, 0x02, 0xc7, 0x16, 0x5c, 0x65, 0xe0, 0x48, 0xc1, + 0x55, 0x46, 0x8e, 0x1c, 0x5c, 0x65, 0xb0, 0xaf, 0xe0, 0x2a, 0x18, 0x4e, 0x4b, 0xd9, 0x93, 0xfe, + 0x5f, 0x71, 0x5b, 0x44, 0x5c, 0x38, 0x78, 0x68, 0xaa, 0xd9, 0x83, 0xfd, 0xea, 0x69, 0x9c, 0x89, + 0x81, 0x73, 0x6a, 0xa2, 0x2f, 0x43, 0xc5, 0x69, 0xb5, 0xfc, 0x07, 0x6a, 0x52, 0x97, 0xc3, 0x86, + 0xd3, 0x8a, 0xc3, 0x32, 0x8e, 0x2c, 0x9c, 0x3b, 0xd8, 0xaf, 0x56, 0xe6, 0x73, 0x70, 0x70, 0x6e, + 0x6d, 0xf4, 0x0e, 0x8c, 0x76, 0x02, 0xbf, 0xb1, 0xa6, 0xbd, 0xaa, 0xbf, 0x40, 0x07, 0xb0, 0x26, + 0x0b, 0x0f, 0xf7, 0xab, 0x13, 0xea, 0x0f, 0x3b, 0xf0, 0xe3, 0x0a, 0x19, 0x71, 0x4b, 0xc6, 0x9e, + 0x76, 0xdc, 0x92, 0xf1, 0x27, 0x1c, 0xb7, 0xc4, 0xde, 0x81, 0x13, 0x75, 0x12, 0xb8, 0x4e, 0xcb, + 0x7d, 0x44, 0x65, 0x72, 0xc9, 0x03, 0x37, 0x60, 0x34, 0x48, 0x70, 0xfd, 0xbe, 0x92, 0x09, 0x68, + 0x7a, 0x19, 0xc9, 0xe5, 0x63, 0x42, 0xf6, 0xff, 0x6b, 0xc1, 0xb0, 0x78, 0x04, 0x7b, 0x0c, 0x92, + 0xe9, 0xbc, 0x61, 0x92, 0xa9, 0x66, 0x4f, 0x0a, 0xeb, 0x4c, 0xae, 0x31, 0x66, 0x35, 0x61, 0x8c, + 0x79, 0xb6, 0x88, 0x48, 0xb1, 0x19, 0xe6, 0x3f, 0x2b, 0xd3, 0x1b, 0x82, 0x11, 0x8e, 0xe1, 0xe9, + 0x0f, 0xc1, 0x3a, 0x0c, 0x87, 0x22, 0x1c, 0x40, 0x29, 0xff, 0x8d, 0x51, 0x72, 0x12, 0x63, 0x1f, + 0x48, 0x11, 0x00, 0x40, 0x12, 0xc9, 0x8c, 0x33, 0x50, 0x7e, 0x8a, 0x71, 0x06, 0x7a, 0x05, 0xac, + 0x18, 0x78, 0x12, 0x01, 0x2b, 0xec, 0xdf, 0x60, 0xa7, 0xb3, 0x5e, 0x7e, 0x0c, 0x82, 0xdb, 0x75, + 0xf3, 0x1c, 0xb7, 0x0b, 0x56, 0x96, 0xe8, 0x54, 0x8e, 0x00, 0xf7, 0x6b, 0x16, 0x9c, 0xcf, 0xf8, + 0x2a, 0x4d, 0x9a, 0x7b, 0x05, 0x46, 0x9c, 0x6e, 0xd3, 0x55, 0x7b, 0x59, 0xb3, 0x16, 0xcf, 0x8b, + 0x72, 0xac, 0x30, 0xd0, 0x22, 0xcc, 0x90, 0x54, 0x7c, 0x61, 0x1e, 0xb9, 0x8b, 0xbd, 0x9c, 0x4e, + 0x07, 0x17, 0x4e, 0xe3, 0xab, 0xa0, 0x77, 0xe5, 0xdc, 0xa0, 0x77, 0x7f, 0xcf, 0x82, 0x31, 0xf5, + 0x20, 0xfe, 0xa9, 0x8f, 0xf6, 0x7b, 0xe6, 0x68, 0x3f, 0x53, 0x30, 0xda, 0x39, 0xc3, 0xfc, 0x7b, + 0x25, 0xd5, 0xdf, 0x9a, 0x1f, 0x44, 0x7d, 0x48, 0x89, 0x8f, 0xff, 0xec, 0xe5, 0x2a, 0x8c, 0x39, + 0x9d, 0x8e, 0x04, 0x48, 0xff, 0x45, 0x96, 0x1a, 0x26, 0x2e, 0xc6, 0x3a, 0x8e, 0x7a, 0x85, 0x53, + 0xce, 0x7d, 0x85, 0xd3, 0x04, 0x88, 0x9c, 0x60, 0x8b, 0x44, 0xb4, 0x4c, 0xb8, 0x5b, 0xe7, 0xf3, + 0x9b, 0x6e, 0xe4, 0xb6, 0xe6, 0x5c, 0x2f, 0x0a, 0xa3, 0x60, 0x6e, 0xd5, 0x8b, 0x6e, 0x07, 0xfc, + 0x9a, 0xaa, 0x85, 0x96, 0x54, 0xb4, 0xb0, 0x46, 0x57, 0x06, 0x7f, 0x61, 0x6d, 0x0c, 0x9a, 0x8e, + 0x30, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x0b, 0xec, 0xf4, 0x61, 0x63, 0x7a, 0xb4, 0x90, 0x89, + 0x7f, 0x3c, 0xae, 0x66, 0x83, 0x99, 0x84, 0x97, 0xf4, 0xc0, 0x8c, 0xc5, 0xcc, 0x9e, 0x36, 0xac, + 0xbf, 0xb3, 0x8d, 0xa3, 0x37, 0xa2, 0xaf, 0xa5, 0x9c, 0x9b, 0x5e, 0xed, 0x71, 0x6a, 0x1c, 0xc1, + 0x9d, 0x89, 0xe5, 0x89, 0x64, 0x59, 0xf4, 0x56, 0x6b, 0x62, 0x5f, 0x68, 0x79, 0x22, 0x05, 0x00, + 0xc7, 0x38, 0x54, 0x60, 0x53, 0x7f, 0xc2, 0x0a, 0x8a, 0xd3, 0x09, 0x28, 0xec, 0x10, 0x6b, 0x18, + 0xe8, 0x8a, 0x50, 0x5a, 0x70, 0xdb, 0xc3, 0x33, 0x09, 0xa5, 0x85, 0x1c, 0x2e, 0x4d, 0xd3, 0x74, + 0x15, 0xc6, 0xc8, 0xc3, 0x88, 0x04, 0x9e, 0xd3, 0xa2, 0x2d, 0x0c, 0xc6, 0xc1, 0x91, 0x97, 0xe3, + 0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0a, 0xb9, 0x2e, 0x4f, 0x25, 0xb1, 0xe1, 0x3a, 0xd1, 0xcf, + 0xaa, 0x50, 0x04, 0x26, 0xf8, 0x90, 0x15, 0x71, 0xee, 0x24, 0x03, 0xb4, 0x24, 0x49, 0xa0, 0x77, + 0x61, 0xb2, 0xe5, 0x3b, 0xcd, 0x05, 0xa7, 0xe5, 0x78, 0x0d, 0x36, 0x3e, 0x23, 0x46, 0x94, 0xce, + 0xc9, 0x5b, 0x06, 0x14, 0x27, 0xb0, 0xa9, 0x80, 0xa8, 0x97, 0x88, 0xc4, 0x4b, 0x8e, 0xb7, 0x45, + 0xc2, 0xca, 0x28, 0xfb, 0x2a, 0x26, 0x20, 0xde, 0xca, 0xc1, 0xc1, 0xb9, 0xb5, 0xd1, 0x35, 0x18, + 0x97, 0x9f, 0xaf, 0xc5, 0x33, 0x8a, 0x1f, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x4e, 0xc9, + 0xff, 0x1b, 0x81, 0xb3, 0xb9, 0xe9, 0x36, 0x44, 0x90, 0x0f, 0xfe, 0x28, 0xfd, 0x4b, 0xf2, 0x05, + 0xec, 0x72, 0x16, 0xd2, 0xe1, 0x7e, 0xf5, 0x9c, 0x18, 0xb5, 0x4c, 0x38, 0xce, 0xa6, 0x8d, 0xd6, + 0xe0, 0x04, 0xf7, 0x81, 0x59, 0xdc, 0x26, 0x8d, 0x1d, 0xb9, 0xe1, 0x98, 0xd4, 0xa8, 0x3d, 0xfc, + 0xb9, 0x91, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0x87, 0x50, 0xe9, 0x74, 0xef, 0xb7, 0xdc, 0x70, 0x7b, + 0xdd, 0x8f, 0x98, 0x0b, 0xd9, 0x7c, 0xb3, 0x19, 0x90, 0x90, 0xbf, 0x59, 0x66, 0x47, 0xaf, 0x8c, + 0x41, 0x55, 0xcb, 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0x23, 0x38, 0x95, 0x58, 0x08, 0x22, 0x98, 0xcc, + 0x64, 0x7e, 0x0a, 0xbb, 0x7a, 0x56, 0x05, 0x11, 0x97, 0x29, 0x0b, 0x84, 0xb3, 0x9b, 0x40, 0x6f, + 0x01, 0xb8, 0x9d, 0x15, 0xa7, 0xed, 0xb6, 0xe8, 0x75, 0xf4, 0x04, 0x5b, 0x23, 0xf4, 0x6a, 0x02, + 0xab, 0x35, 0x59, 0x4a, 0x79, 0xb3, 0xf8, 0xb7, 0x87, 0x35, 0x6c, 0x74, 0x0b, 0x26, 0xc5, 0xbf, + 0x3d, 0x31, 0xa5, 0x33, 0x2a, 0xdb, 0xf1, 0xa4, 0xac, 0xa1, 0xe6, 0x31, 0x51, 0x82, 0x13, 0x75, + 0xd1, 0x16, 0x9c, 0x97, 0xa9, 0x96, 0xf5, 0xf5, 0x29, 0xe7, 0x20, 0x64, 0x79, 0xe3, 0x46, 0xf8, + 0x9b, 0xa2, 0xf9, 0x22, 0x44, 0x5c, 0x4c, 0x87, 0x9e, 0xeb, 0xfa, 0x32, 0xe7, 0x2f, 0xd9, 0x4f, + 0xc5, 0xb1, 0x4e, 0x6f, 0x25, 0x81, 0x38, 0x8d, 0x8f, 0x7c, 0x38, 0xe5, 0x7a, 0x59, 0xab, 0xfa, + 0x34, 0x23, 0xf4, 0x45, 0xfe, 0x88, 0xbf, 0x78, 0x45, 0x67, 0xc2, 0x71, 0x36, 0x5d, 0xb4, 0x0a, + 0x27, 0x22, 0x5e, 0xb0, 0xe4, 0x86, 0x3c, 0x2d, 0x15, 0xbd, 0xf6, 0x9d, 0x61, 0xcd, 0x9d, 0xa1, + 0xab, 0x79, 0x23, 0x0d, 0xc6, 0x59, 0x75, 0x3e, 0x9e, 0x03, 0xe8, 0xef, 0x5b, 0xb4, 0xb6, 0x26, + 0xe8, 0xa3, 0x6f, 0xc2, 0xb8, 0x3e, 0x3e, 0x42, 0x68, 0xb9, 0x94, 0x2d, 0x07, 0x6b, 0xec, 0x85, + 0x5f, 0x13, 0x14, 0x0b, 0xd1, 0x61, 0xd8, 0xa0, 0x88, 0x1a, 0x19, 0xc1, 0x37, 0xae, 0xf4, 0x27, + 0x14, 0xf5, 0xef, 0xff, 0x48, 0x20, 0x7b, 0xe7, 0xa0, 0x5b, 0x30, 0xd2, 0x68, 0xb9, 0xc4, 0x8b, + 0x56, 0x6b, 0x45, 0x21, 0x68, 0x17, 0x05, 0x8e, 0xd8, 0x8a, 0x22, 0x9b, 0x1c, 0x2f, 0xc3, 0x8a, + 0x82, 0x7d, 0x0d, 0xc6, 0xea, 0x2d, 0x42, 0x3a, 0xfc, 0x1d, 0x17, 0x7a, 0x89, 0x5d, 0x4c, 0x98, + 0x68, 0x69, 0x31, 0xd1, 0x52, 0xbf, 0x73, 0x30, 0xa1, 0x52, 0xc2, 0xed, 0xdf, 0x2e, 0x41, 0xb5, + 0x47, 0x52, 0xc3, 0x84, 0xbd, 0xcd, 0xea, 0xcb, 0xde, 0x36, 0x0f, 0x53, 0xf1, 0x3f, 0x5d, 0x95, + 0xa7, 0x9c, 0xa1, 0xef, 0x9a, 0x60, 0x9c, 0xc4, 0xef, 0xfb, 0x5d, 0x8b, 0x6e, 0xb2, 0x1b, 0xe8, + 0xf9, 0x32, 0xcb, 0x30, 0xd5, 0x0f, 0xf6, 0x7f, 0xf7, 0xce, 0x35, 0xbb, 0xda, 0xbf, 0x51, 0x82, + 0x53, 0x6a, 0x08, 0x7f, 0x78, 0x07, 0xee, 0x4e, 0x7a, 0xe0, 0x9e, 0x80, 0xd1, 0xda, 0xbe, 0x0d, + 0x43, 0x3c, 0x2e, 0x6e, 0x1f, 0x32, 0xff, 0x73, 0x66, 0x1e, 0x06, 0x25, 0x66, 0x1a, 0xb9, 0x18, + 0xfe, 0x92, 0x05, 0x53, 0x89, 0x07, 0x92, 0x08, 0x6b, 0xaf, 0xe8, 0x1f, 0x47, 0x2e, 0xcf, 0x92, + 0xf8, 0x2f, 0xc2, 0xc0, 0xb6, 0xaf, 0x9c, 0x94, 0x15, 0xc6, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8, + 0xff, 0xd2, 0x82, 0xc1, 0x0d, 0xc7, 0xf5, 0x22, 0x69, 0xfd, 0xb0, 0x72, 0xac, 0x1f, 0xfd, 0x7c, + 0x17, 0x7a, 0x13, 0x86, 0xc8, 0xe6, 0x26, 0x69, 0x44, 0x62, 0x56, 0x65, 0x94, 0x8f, 0xa1, 0x65, + 0x56, 0x4a, 0x85, 0x50, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa3, 0x91, 0xdb, 0x26, + 0xf3, 0xcd, 0xa6, 0xf0, 0x09, 0x78, 0x8c, 0xd0, 0x34, 0x1b, 0x92, 0x00, 0x8e, 0x69, 0xd9, 0xdf, + 0x2b, 0x01, 0xc4, 0x71, 0xf8, 0x7a, 0x7d, 0xe2, 0x42, 0xca, 0x5a, 0x7c, 0x29, 0xc3, 0x5a, 0x8c, + 0x62, 0x82, 0x19, 0xa6, 0x62, 0x35, 0x4c, 0xe5, 0xbe, 0x86, 0x69, 0xe0, 0x28, 0xc3, 0xb4, 0x08, + 0x33, 0x71, 0x1c, 0x41, 0x33, 0x8c, 0x2a, 0x3b, 0xbf, 0x37, 0x92, 0x40, 0x9c, 0xc6, 0xb7, 0x09, + 0x5c, 0x54, 0xe1, 0xd4, 0xc4, 0x59, 0xc8, 0x9e, 0x12, 0xe8, 0xd6, 0xf7, 0x1e, 0xe3, 0x14, 0x9b, + 0xc3, 0x4b, 0xb9, 0xe6, 0xf0, 0xbf, 0x69, 0xc1, 0xc9, 0x64, 0x3b, 0xec, 0xdd, 0xfd, 0x77, 0x2d, + 0x38, 0x15, 0xe7, 0xf4, 0x4a, 0xbb, 0x20, 0xbc, 0x51, 0x18, 0x22, 0x2e, 0xa7, 0xc7, 0x71, 0x38, + 0x99, 0xb5, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xff, 0x19, 0x80, 0x4a, 0x5e, 0x6c, 0x39, 0xf6, + 0xd2, 0xc8, 0x79, 0x58, 0xdf, 0x21, 0x0f, 0xc4, 0x7b, 0x8e, 0xf8, 0xa5, 0x11, 0x2f, 0xc6, 0x12, + 0x9e, 0x4c, 0xe3, 0x56, 0xea, 0x33, 0x8d, 0xdb, 0x36, 0xcc, 0x3c, 0xd8, 0x26, 0xde, 0x1d, 0x2f, + 0x74, 0x22, 0x37, 0xdc, 0x74, 0x99, 0x01, 0x9d, 0xaf, 0x9b, 0xb7, 0xe4, 0xab, 0x8b, 0x7b, 0x49, + 0x84, 0xc3, 0xfd, 0xea, 0x79, 0xa3, 0x20, 0xee, 0x32, 0x67, 0x24, 0x38, 0x4d, 0x34, 0x9d, 0x05, + 0x6f, 0xe0, 0x29, 0x67, 0xc1, 0x6b, 0xbb, 0xc2, 0xed, 0x46, 0x3e, 0x23, 0x61, 0xd7, 0xd6, 0x35, + 0x55, 0x8a, 0x35, 0x0c, 0xf4, 0x75, 0x40, 0x7a, 0x1a, 0x53, 0x23, 0xb4, 0xef, 0xab, 0x07, 0xfb, + 0x55, 0xb4, 0x9e, 0x82, 0x1e, 0xee, 0x57, 0x4f, 0xd0, 0xd2, 0x55, 0x8f, 0x5e, 0x7f, 0xe3, 0x78, + 0x88, 0x19, 0x84, 0xd0, 0x3d, 0x98, 0xa6, 0xa5, 0x6c, 0x47, 0xc9, 0xb8, 0xc1, 0xfc, 0xca, 0xfa, + 0xf2, 0xc1, 0x7e, 0x75, 0x7a, 0x3d, 0x01, 0xcb, 0x23, 0x9d, 0x22, 0x92, 0x91, 0x0c, 0x6f, 0xa4, + 0xdf, 0x64, 0x78, 0xf6, 0x77, 0x2d, 0x38, 0x4b, 0x0f, 0xb8, 0xe6, 0xad, 0x1c, 0x2b, 0xba, 0xd3, + 0x71, 0xb9, 0x9d, 0x46, 0x1c, 0x35, 0x4c, 0x57, 0x57, 0x5b, 0xe5, 0x56, 0x1a, 0x05, 0xa5, 0x1c, + 0x7e, 0xc7, 0xf5, 0x9a, 0x49, 0x0e, 0x7f, 0xd3, 0xf5, 0x9a, 0x98, 0x41, 0xd4, 0x91, 0x55, 0xce, + 0xcd, 0x43, 0xf0, 0x2b, 0x74, 0xaf, 0xd2, 0xbe, 0xfc, 0x40, 0xbb, 0x81, 0x5e, 0xd6, 0x6d, 0xaa, + 0xc2, 0x7d, 0x32, 0xd7, 0x9e, 0xfa, 0x1d, 0x0b, 0xc4, 0xeb, 0xf7, 0x3e, 0xce, 0xe4, 0xaf, 0xc2, + 0xf8, 0x6e, 0x3a, 0xc5, 0xf3, 0xc5, 0xfc, 0x70, 0x00, 0x22, 0xb1, 0xb3, 0x12, 0xd1, 0x8d, 0x74, + 0xce, 0x06, 0x2d, 0xbb, 0x09, 0x02, 0xba, 0x44, 0x98, 0x55, 0xa3, 0x77, 0x6f, 0x5e, 0x03, 0x68, + 0x32, 0x5c, 0x96, 0xec, 0xac, 0x64, 0x4a, 0x5c, 0x4b, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x17, 0xca, + 0x30, 0x26, 0x53, 0x0a, 0x77, 0xbd, 0x7e, 0x74, 0x8f, 0xba, 0xe0, 0x54, 0xea, 0x29, 0x38, 0x7d, + 0x08, 0x33, 0x01, 0x69, 0x74, 0x83, 0xd0, 0xdd, 0x25, 0x12, 0x2c, 0x36, 0xc9, 0x1c, 0x4f, 0x83, + 0x91, 0x00, 0x1e, 0xb2, 0xd0, 0x5d, 0x89, 0x42, 0x66, 0x34, 0x4e, 0x13, 0x42, 0x57, 0x60, 0x94, + 0xa9, 0xde, 0x6b, 0xb1, 0x42, 0x58, 0x29, 0xbe, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xec, 0x72, 0xd0, + 0xbd, 0xaf, 0x65, 0xa2, 0x8b, 0x2f, 0x07, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x32, 0x4c, 0xf3, 0x7a, + 0x81, 0xdf, 0x71, 0xb6, 0xb8, 0x49, 0x70, 0x50, 0x85, 0xd7, 0x99, 0x5e, 0x4b, 0xc0, 0x0e, 0xf7, + 0xab, 0x27, 0x93, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf3, 0x8f, 0x37, 0x42, 0xcf, 0x8c, 0x94, + 0xc3, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0x27, 0x16, 0xcc, 0x68, 0x53, 0xd5, 0x77, 0x26, 0x12, + 0x63, 0x90, 0x4a, 0x7d, 0x0c, 0xd2, 0xd1, 0xa2, 0x3d, 0x64, 0xce, 0xf0, 0xc0, 0x13, 0x9a, 0x61, + 0xfb, 0x9b, 0x80, 0xd2, 0xf9, 0xaa, 0xd1, 0xfb, 0xdc, 0x91, 0xdf, 0x0d, 0x48, 0xb3, 0xc8, 0xe0, + 0xaf, 0x47, 0xce, 0x91, 0x2f, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x7f, 0x32, 0x00, 0xd3, 0xc9, + 0x58, 0x1d, 0xe8, 0x06, 0x0c, 0x71, 0x29, 0x5d, 0x90, 0x2f, 0xf0, 0x27, 0xd3, 0x22, 0x7c, 0xf0, + 0x2c, 0x41, 0x5c, 0xba, 0x17, 0xf5, 0xd1, 0x87, 0x30, 0xd6, 0xf4, 0x1f, 0x78, 0x0f, 0x9c, 0xa0, + 0x39, 0x5f, 0x5b, 0x15, 0x1c, 0x22, 0x53, 0x01, 0xb5, 0x14, 0xa3, 0xe9, 0x51, 0x43, 0x98, 0xef, + 0x44, 0x0c, 0xc2, 0x3a, 0x39, 0xb4, 0xc1, 0x12, 0x57, 0x6d, 0xba, 0x5b, 0x6b, 0x4e, 0xa7, 0xe8, + 0x55, 0xd7, 0xa2, 0x44, 0xd2, 0x28, 0x4f, 0x88, 0xec, 0x56, 0x1c, 0x80, 0x63, 0x42, 0xe8, 0x47, + 0xe1, 0x44, 0x98, 0x63, 0x12, 0xcb, 0x71, 0x38, 0x28, 0xb4, 0x12, 0x71, 0x65, 0x4a, 0x96, 0xf1, + 0x2c, 0xab, 0x19, 0xf4, 0x10, 0x90, 0x50, 0x3d, 0x6f, 0x04, 0xdd, 0x30, 0xe2, 0x29, 0x20, 0xc5, + 0xa5, 0xeb, 0x73, 0xd9, 0x7a, 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x0b, 0x9c, 0x9c, 0xc6, 0xc0, 0x19, + 0x6d, 0xa0, 0x6d, 0x98, 0xec, 0x18, 0xd9, 0x37, 0xd9, 0xde, 0xcc, 0x89, 0x2e, 0x9c, 0x97, 0xa7, + 0x93, 0x9f, 0xd2, 0x26, 0x14, 0x27, 0xe8, 0xda, 0xdf, 0x19, 0x80, 0x59, 0x99, 0x8a, 0x3e, 0xe3, + 0x9d, 0xcc, 0xb7, 0xad, 0xc4, 0x43, 0x99, 0xb7, 0xf2, 0x8f, 0x94, 0xa7, 0xf6, 0x5c, 0xe6, 0x27, + 0xd3, 0xcf, 0x65, 0xde, 0x39, 0x62, 0x37, 0x9e, 0xd8, 0xa3, 0x99, 0x1f, 0xda, 0x97, 0x2e, 0x07, + 0x27, 0xc1, 0x10, 0x02, 0x10, 0xe6, 0xf1, 0xef, 0x6b, 0xd2, 0x48, 0x95, 0xa3, 0x68, 0xb8, 0x21, + 0x70, 0x0c, 0xb1, 0x62, 0x5c, 0x46, 0xc9, 0x67, 0x1c, 0x5d, 0xd1, 0xa1, 0x34, 0x49, 0xbb, 0x13, + 0xed, 0x2d, 0xb9, 0x81, 0xe8, 0x71, 0x26, 0xcd, 0x65, 0x81, 0x93, 0xa6, 0x29, 0x21, 0x58, 0xd1, + 0x41, 0xbb, 0x30, 0xb3, 0xc5, 0x62, 0x4b, 0x69, 0x59, 0xe1, 0x05, 0x07, 0xca, 0xe4, 0x10, 0xd7, + 0x17, 0x97, 0xf3, 0x53, 0xc8, 0xf3, 0x6b, 0x66, 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa4, + 0xf3, 0x20, 0x5c, 0x6e, 0x39, 0x61, 0xe4, 0x36, 0x16, 0x5a, 0x7e, 0x63, 0xa7, 0x1e, 0xf9, 0x81, + 0xcc, 0x2a, 0x9a, 0x79, 0xcb, 0x9b, 0xbf, 0x57, 0x4f, 0xe1, 0x1b, 0xcd, 0xb3, 0xec, 0xb6, 0x59, + 0x58, 0x38, 0xb3, 0x2d, 0xb4, 0x0e, 0xc3, 0x5b, 0x6e, 0x84, 0x49, 0xc7, 0x17, 0x7c, 0x29, 0x93, + 0xe9, 0x5e, 0xe7, 0x28, 0x46, 0x4b, 0x2c, 0xf6, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xaf, 0x8e, + 0x9b, 0xa1, 0x7c, 0x55, 0x6f, 0xda, 0xcb, 0x2f, 0xf3, 0xc0, 0x79, 0x17, 0xca, 0xde, 0x66, 0x58, + 0x14, 0xf5, 0x67, 0x7d, 0xc5, 0xd0, 0xd4, 0x2d, 0x0c, 0xd3, 0x4b, 0xf8, 0xfa, 0x4a, 0x1d, 0xd3, + 0x8a, 0xec, 0x81, 0x6d, 0xd8, 0x08, 0x5d, 0x91, 0xbc, 0x2b, 0xf3, 0xbd, 0xf1, 0x6a, 0x7d, 0xb1, + 0xbe, 0x6a, 0xd0, 0x60, 0xf1, 0x13, 0x59, 0x31, 0xe6, 0xd5, 0xd1, 0x5d, 0x18, 0xdd, 0xe2, 0x2c, + 0x76, 0x93, 0x87, 0xb5, 0xcd, 0x39, 0xf6, 0xae, 0x4b, 0x24, 0x83, 0x1e, 0x3b, 0x9c, 0x14, 0x08, + 0xc7, 0xa4, 0xd0, 0x77, 0x2c, 0x38, 0xd5, 0x49, 0xe8, 0x6a, 0xd9, 0xb3, 0x38, 0xe1, 0x10, 0x97, + 0xf9, 0xd4, 0xa0, 0x96, 0x55, 0xc1, 0x68, 0x90, 0x19, 0x7a, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74, + 0xa0, 0x83, 0xfb, 0xcd, 0xa2, 0x7c, 0x4f, 0x89, 0x10, 0x48, 0x7c, 0xa0, 0xf1, 0xc2, 0x12, 0xa6, + 0x15, 0xd1, 0x06, 0xc0, 0x66, 0x8b, 0x88, 0xd8, 0x92, 0xc2, 0xfd, 0x2a, 0x53, 0xce, 0x58, 0x51, + 0x58, 0x82, 0x0e, 0xbb, 0xf3, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xc3, 0xf5, 0x9a, 0x24, + 0x60, 0x66, 0xb4, 0x9c, 0xa5, 0xb4, 0xc8, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1, + 0x22, 0x9d, 0xed, 0xcd, 0xb0, 0x28, 0xb3, 0xc8, 0x22, 0xe9, 0x6c, 0x27, 0x16, 0x14, 0xa7, 0xc5, + 0xca, 0xb1, 0xa0, 0x40, 0xb7, 0xcc, 0x26, 0xdd, 0x40, 0x24, 0xa8, 0x4c, 0xe5, 0x6f, 0x99, 0x15, + 0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x0d, 0x53, 0xae, 0x9a, 0x66, 0x34, 0x5f, + 0xee, 0x21, 0x57, 0x19, 0x74, 0x8b, 0x25, 0xab, 0xb7, 0xa0, 0xb4, 0xd9, 0x60, 0xe6, 0xb7, 0x1c, + 0xeb, 0xc4, 0xca, 0xa2, 0x41, 0x8d, 0x45, 0xea, 0x5f, 0x59, 0xc4, 0xa5, 0xcd, 0x06, 0x5d, 0xfa, + 0xce, 0xa3, 0x6e, 0x40, 0x56, 0xdc, 0x16, 0x11, 0xa1, 0x83, 0x33, 0x97, 0xfe, 0xbc, 0x44, 0x4a, + 0x2f, 0x7d, 0x05, 0xc2, 0x31, 0x29, 0x4a, 0x37, 0x96, 0xf6, 0x4e, 0xe4, 0xd3, 0x55, 0x42, 0x5d, + 0x9a, 0x6e, 0xa6, 0xbc, 0xb7, 0x03, 0x13, 0xbb, 0x61, 0x67, 0x9b, 0x48, 0xae, 0xc8, 0x0c, 0x83, + 0x39, 0x31, 0x31, 0xee, 0x0a, 0x44, 0x37, 0x88, 0xba, 0x4e, 0x2b, 0xc5, 0xc8, 0x99, 0x12, 0xe7, + 0xae, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0x42, 0xf8, 0x88, 0x07, 0xae, 0x63, 0x26, 0xc2, 0x9c, 0x85, + 0x90, 0x11, 0xdb, 0x8e, 0x2f, 0x04, 0x01, 0xc0, 0x92, 0x88, 0x1a, 0x6c, 0x76, 0x00, 0x9d, 0xee, + 0x31, 0xd8, 0xa9, 0xfe, 0xc6, 0x83, 0xcd, 0x0e, 0x9c, 0x98, 0x14, 0x3b, 0x68, 0x3a, 0xdb, 0x7e, + 0xe4, 0x7b, 0x89, 0x43, 0xee, 0x4c, 0xfe, 0x41, 0x53, 0xcb, 0xc0, 0x4f, 0x1f, 0x34, 0x59, 0x58, + 0x38, 0xb3, 0x2d, 0xfa, 0x71, 0x1d, 0x19, 0x83, 0x50, 0x64, 0x42, 0x79, 0x29, 0x27, 0x84, 0x67, + 0x3a, 0x50, 0x21, 0xff, 0x38, 0x05, 0xc2, 0x31, 0x29, 0xd4, 0xa4, 0x92, 0xae, 0x1e, 0xdb, 0x96, + 0x65, 0x74, 0xc9, 0x91, 0x0b, 0xb2, 0xa2, 0xe0, 0x4a, 0x29, 0x57, 0x87, 0xe0, 0x04, 0x4d, 0xe6, + 0x23, 0xc8, 0x1f, 0x15, 0xb2, 0x84, 0x2f, 0x39, 0x53, 0x9d, 0xf1, 0xee, 0x90, 0x4f, 0xb5, 0x00, + 0x60, 0x49, 0x84, 0x8e, 0x86, 0x78, 0x0a, 0xe7, 0x87, 0x2c, 0x6f, 0x52, 0x9e, 0x29, 0x3f, 0xcb, + 0x20, 0x25, 0x03, 0xcd, 0x0b, 0x10, 0x8e, 0x49, 0x51, 0x4e, 0x4e, 0x0f, 0xbc, 0x73, 0xf9, 0x9c, + 0x3c, 0x79, 0xdc, 0x31, 0x4e, 0x4e, 0x0f, 0xbb, 0xb2, 0x38, 0xea, 0x54, 0x5c, 0x74, 0x96, 0xf3, + 0x25, 0xa7, 0x5f, 0x2a, 0xb0, 0x7a, 0xba, 0x5f, 0x0a, 0x84, 0x63, 0x52, 0xec, 0x28, 0x66, 0x41, + 0xf0, 0x2e, 0x14, 0x1c, 0xc5, 0x14, 0x21, 0xe3, 0x28, 0xd6, 0x82, 0xe4, 0xd9, 0x7f, 0xb9, 0x04, + 0x17, 0x8a, 0xf7, 0x6d, 0x6c, 0xad, 0xab, 0xc5, 0xde, 0x51, 0x09, 0x6b, 0x1d, 0xd7, 0x1d, 0xc5, + 0x58, 0x7d, 0x87, 0x36, 0xbe, 0x0e, 0x33, 0xea, 0xe1, 0x63, 0xcb, 0x6d, 0xec, 0x69, 0x89, 0x5e, + 0x55, 0x10, 0xa0, 0x7a, 0x12, 0x01, 0xa7, 0xeb, 0xa0, 0x79, 0x98, 0x32, 0x0a, 0x57, 0x97, 0x84, + 0xa2, 0x21, 0xce, 0x56, 0x62, 0x82, 0x71, 0x12, 0xdf, 0xfe, 0x45, 0x0b, 0xce, 0xf0, 0x40, 0xbc, + 0xa4, 0x59, 0xf3, 0x9b, 0x52, 0xa3, 0x70, 0xa4, 0xc8, 0xbd, 0x9b, 0x30, 0xd5, 0x31, 0xab, 0xf6, + 0x08, 0x36, 0xae, 0xa3, 0xc6, 0x7d, 0x4d, 0x00, 0x70, 0x92, 0xa8, 0xfd, 0xf3, 0x25, 0x38, 0x5f, + 0xe8, 0xc9, 0x8f, 0x30, 0x9c, 0xde, 0x6a, 0x87, 0xce, 0x62, 0x40, 0x9a, 0xc4, 0x8b, 0x5c, 0xa7, + 0x55, 0xef, 0x90, 0x86, 0x66, 0x6f, 0x65, 0x2e, 0xf1, 0xd7, 0xd7, 0xea, 0xf3, 0x69, 0x0c, 0x9c, + 0x53, 0x13, 0xad, 0x00, 0x4a, 0x43, 0xc4, 0x0c, 0xb3, 0xcb, 0x74, 0x9a, 0x1e, 0xce, 0xa8, 0x81, + 0xbe, 0x00, 0x13, 0xea, 0x85, 0x80, 0x36, 0xe3, 0xec, 0x80, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8, + 0x2a, 0x4f, 0x63, 0x25, 0x12, 0x9e, 0x09, 0xe3, 0xec, 0x94, 0xcc, 0x51, 0x25, 0x8a, 0xb1, 0x8e, + 0xb3, 0x70, 0xed, 0x77, 0xfe, 0xf0, 0xc2, 0x67, 0x7e, 0xf7, 0x0f, 0x2f, 0x7c, 0xe6, 0x0f, 0xfe, + 0xf0, 0xc2, 0x67, 0x7e, 0xfc, 0xe0, 0x82, 0xf5, 0x3b, 0x07, 0x17, 0xac, 0xdf, 0x3d, 0xb8, 0x60, + 0xfd, 0xc1, 0xc1, 0x05, 0xeb, 0x7f, 0x3b, 0xb8, 0x60, 0x7d, 0xef, 0x7f, 0xbf, 0xf0, 0x99, 0xaf, + 0xa2, 0x38, 0x16, 0xf6, 0x15, 0x3a, 0x3b, 0x57, 0x76, 0xaf, 0xfe, 0x87, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xba, 0xfb, 0xfc, 0xdd, 0x18, 0x2e, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -9346,6 +9549,22 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RestartPolicyRules) > 0 { + for iNdEx := len(m.RestartPolicyRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RestartPolicyRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } if m.RestartPolicy != nil { i -= len(*m.RestartPolicy) copy(dAtA[i:], *m.RestartPolicy) @@ -9600,6 +9819,44 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerExtendedResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerExtendedResourceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerExtendedResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RequestName) + copy(dAtA[i:], m.RequestName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContainerImage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9712,6 +9969,81 @@ func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerRestartRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerRestartRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerRestartRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExitCodes != nil { + { + size, err := m.ExitCodes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerRestartRuleOnExitCodes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerRestartRuleOnExitCodes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerRestartRuleOnExitCodes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.Values[iNdEx])) + i-- + dAtA[i] = 0x10 + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -10681,6 +11013,18 @@ func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.FileKeyRef != nil { + { + size, err := m.FileKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.SecretKeyRef != nil { { size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) @@ -10790,6 +11134,22 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if len(m.RestartPolicyRules) > 0 { + for iNdEx := len(m.RestartPolicyRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RestartPolicyRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } if m.RestartPolicy != nil { i -= len(*m.RestartPolicy) copy(dAtA[i:], *m.RestartPolicy) @@ -11426,6 +11786,54 @@ func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FileKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -15791,6 +16199,59 @@ func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodCertificateProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCertificateProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCertificateProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CertificateChainPath) + copy(dAtA[i:], m.CertificateChainPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChainPath))) + i-- + dAtA[i] = 0x32 + i -= len(m.KeyPath) + copy(dAtA[i:], m.KeyPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyPath))) + i-- + dAtA[i] = 0x2a + i -= len(m.CredentialBundlePath) + copy(dAtA[i:], m.CredentialBundlePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CredentialBundlePath))) + i-- + dAtA[i] = 0x22 + if m.MaxExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.KeyType) + copy(dAtA[i:], m.KeyType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyType))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PodCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -16016,6 +16477,48 @@ func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodExtendedResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodExtendedResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodExtendedResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ResourceClaimName) + copy(dAtA[i:], m.ResourceClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceClaimName))) + i-- + dAtA[i] = 0x12 + if len(m.RequestMappings) > 0 { + for iNdEx := len(m.RequestMappings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequestMappings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *PodIP) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -16597,6 +17100,15 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.HostnameOverride != nil { + i -= len(*m.HostnameOverride) + copy(dAtA[i:], *m.HostnameOverride) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HostnameOverride))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } if m.Resources != nil { { size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) @@ -17085,6 +17597,20 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ExtendedResourceClaimStatus != nil { + { + size, err := m.ExtendedResourceClaimStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- dAtA[i] = 0x1 @@ -21108,6 +21634,18 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PodCertificate != nil { + { + size, err := m.PodCertificate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if m.ClusterTrustBundle != nil { { size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i]) @@ -22471,6 +23009,27 @@ func (m *Container) Size() (n int) { l = len(*m.RestartPolicy) n += 2 + l + sovGenerated(uint64(l)) } + if len(m.RestartPolicyRules) > 0 { + for _, e := range m.RestartPolicyRules { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ContainerExtendedResourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RequestName) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -22520,6 +23079,37 @@ func (m *ContainerResizePolicy) Size() (n int) { return n } +func (m *ContainerRestartRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExitCodes != nil { + l = m.ExitCodes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContainerRestartRuleOnExitCodes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, e := range m.Values { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + func (m *ContainerState) Size() (n int) { if m == nil { return 0 @@ -22896,6 +23486,10 @@ func (m *EnvVarSource) Size() (n int) { l = m.SecretKeyRef.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.FileKeyRef != nil { + l = m.FileKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -23007,6 +23601,12 @@ func (m *EphemeralContainerCommon) Size() (n int) { l = len(*m.RestartPolicy) n += 2 + l + sovGenerated(uint64(l)) } + if len(m.RestartPolicyRules) > 0 { + for _, e := range m.RestartPolicyRules { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -23149,6 +23749,24 @@ func (m *FCVolumeSource) Size() (n int) { return n } +func (m *FileKeySelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + func (m *FlexPersistentVolumeSource) Size() (n int) { if m == nil { return 0 @@ -24752,6 +25370,28 @@ func (m *PodAttachOptions) Size() (n int) { return n } +func (m *PodCertificateProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyType) + n += 1 + l + sovGenerated(uint64(l)) + if m.MaxExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds)) + } + l = len(m.CredentialBundlePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CertificateChainPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodCondition) Size() (n int) { if m == nil { return 0 @@ -24837,6 +25477,23 @@ func (m *PodExecOptions) Size() (n int) { return n } +func (m *PodExtendedResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequestMappings) > 0 { + for _, e := range m.RequestMappings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ResourceClaimName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodIP) Size() (n int) { if m == nil { return 0 @@ -25224,6 +25881,10 @@ func (m *PodSpec) Size() (n int) { l = m.Resources.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.HostnameOverride != nil { + l = len(*m.HostnameOverride) + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -25296,6 +25957,10 @@ func (m *PodStatus) Size() (n int) { } } n += 2 + sovGenerated(uint64(m.ObservedGeneration)) + if m.ExtendedResourceClaimStatus != nil { + l = m.ExtendedResourceClaimStatus.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -26751,6 +27416,10 @@ func (m *VolumeProjection) Size() (n int) { l = m.ClusterTrustBundle.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.PodCertificate != nil { + l = m.PodCertificate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -27426,6 +28095,11 @@ func (this *Container) String() string { repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," } repeatedStringForResizePolicy += "}" + repeatedStringForRestartPolicyRules := "[]ContainerRestartRule{" + for _, f := range this.RestartPolicyRules { + repeatedStringForRestartPolicyRules += strings.Replace(strings.Replace(f.String(), "ContainerRestartRule", "ContainerRestartRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRestartPolicyRules += "}" s := strings.Join([]string{`&Container{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -27451,6 +28125,19 @@ func (this *Container) String() string { `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`, + `RestartPolicyRules:` + repeatedStringForRestartPolicyRules + `,`, + `}`, + }, "") + return s +} +func (this *ContainerExtendedResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerExtendedResourceRequest{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `RequestName:` + fmt.Sprintf("%v", this.RequestName) + `,`, `}`, }, "") return s @@ -27491,6 +28178,28 @@ func (this *ContainerResizePolicy) String() string { }, "") return s } +func (this *ContainerRestartRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerRestartRule{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `ExitCodes:` + strings.Replace(this.ExitCodes.String(), "ContainerRestartRuleOnExitCodes", "ContainerRestartRuleOnExitCodes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerRestartRuleOnExitCodes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerRestartRuleOnExitCodes{`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} func (this *ContainerState) String() string { if this == nil { return "nil" @@ -27777,6 +28486,7 @@ func (this *EnvVarSource) String() string { `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, `ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, `SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `FileKeyRef:` + strings.Replace(this.FileKeyRef.String(), "FileKeySelector", "FileKeySelector", 1) + `,`, `}`, }, "") return s @@ -27826,6 +28536,11 @@ func (this *EphemeralContainerCommon) String() string { repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," } repeatedStringForResizePolicy += "}" + repeatedStringForRestartPolicyRules := "[]ContainerRestartRule{" + for _, f := range this.RestartPolicyRules { + repeatedStringForRestartPolicyRules += strings.Replace(strings.Replace(f.String(), "ContainerRestartRule", "ContainerRestartRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRestartPolicyRules += "}" s := strings.Join([]string{`&EphemeralContainerCommon{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -27851,6 +28566,7 @@ func (this *EphemeralContainerCommon) String() string { `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`, + `RestartPolicyRules:` + repeatedStringForRestartPolicyRules + `,`, `}`, }, "") return s @@ -27951,6 +28667,19 @@ func (this *FCVolumeSource) String() string { }, "") return s } +func (this *FileKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileKeySelector{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} func (this *FlexPersistentVolumeSource) String() string { if this == nil { return "nil" @@ -29169,6 +29898,21 @@ func (this *PodAttachOptions) String() string { }, "") return s } +func (this *PodCertificateProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCertificateProjection{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `KeyType:` + fmt.Sprintf("%v", this.KeyType) + `,`, + `MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`, + `CredentialBundlePath:` + fmt.Sprintf("%v", this.CredentialBundlePath) + `,`, + `KeyPath:` + fmt.Sprintf("%v", this.KeyPath) + `,`, + `CertificateChainPath:` + fmt.Sprintf("%v", this.CertificateChainPath) + `,`, + `}`, + }, "") + return s +} func (this *PodCondition) String() string { if this == nil { return "nil" @@ -29228,6 +29972,22 @@ func (this *PodExecOptions) String() string { }, "") return s } +func (this *PodExtendedResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequestMappings := "[]ContainerExtendedResourceRequest{" + for _, f := range this.RequestMappings { + repeatedStringForRequestMappings += strings.Replace(strings.Replace(f.String(), "ContainerExtendedResourceRequest", "ContainerExtendedResourceRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForRequestMappings += "}" + s := strings.Join([]string{`&PodExtendedResourceClaimStatus{`, + `RequestMappings:` + repeatedStringForRequestMappings + `,`, + `ResourceClaimName:` + fmt.Sprintf("%v", this.ResourceClaimName) + `,`, + `}`, + }, "") + return s +} func (this *PodIP) String() string { if this == nil { return "nil" @@ -29503,6 +30263,7 @@ func (this *PodSpec) String() string { `SchedulingGates:` + repeatedStringForSchedulingGates + `,`, `ResourceClaims:` + repeatedStringForResourceClaims + `,`, `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, + `HostnameOverride:` + valueToStringGenerated(this.HostnameOverride) + `,`, `}`, }, "") return s @@ -29564,6 +30325,7 @@ func (this *PodStatus) String() string { `ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`, `HostIPs:` + repeatedStringForHostIPs + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ExtendedResourceClaimStatus:` + strings.Replace(this.ExtendedResourceClaimStatus.String(), "PodExtendedResourceClaimStatus", "PodExtendedResourceClaimStatus", 1) + `,`, `}`, }, "") return s @@ -30673,6 +31435,7 @@ func (this *VolumeProjection) String() string { `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, `ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`, + `PodCertificate:` + strings.Replace(this.PodCertificate.String(), "PodCertificateProjection", "PodCertificateProjection", 1) + `,`, `}`, }, "") return s @@ -36465,61 +37228,11 @@ func (m *Container) Unmarshal(dAtA []byte) error { s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) m.RestartPolicy = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerImage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 25: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicyRules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36529,43 +37242,26 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) - } - m.SizeBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SizeBytes |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + m.RestartPolicyRules = append(m.RestartPolicyRules, ContainerRestartRule{}) + if err := m.RestartPolicyRules[len(m.RestartPolicyRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -36587,7 +37283,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerPort) Unmarshal(dAtA []byte) error { +func (m *ContainerExtendedResourceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36610,15 +37306,15 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerExtendedResourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerExtendedResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36646,49 +37342,11 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ContainerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) - } - m.HostPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HostPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) - } - m.ContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ContainerPort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36716,11 +37374,11 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + m.ResourceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequestName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36748,7 +37406,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HostIP = string(dAtA[iNdEx:postIndex]) + m.RequestName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -36771,7 +37429,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { +func (m *ContainerImage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36794,15 +37452,15 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36830,13 +37488,13 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceName = ResourceName(dAtA[iNdEx:postIndex]) + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) } - var stringLen uint64 + m.SizeBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36846,24 +37504,585 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.SizeBytes |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerRestartRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerRestartRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerRestartRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = ContainerRestartRuleAction(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExitCodes == nil { + m.ExitCodes = &ContainerRestartRuleOnExitCodes{} + } + if err := m.ExitCodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerRestartRuleOnExitCodes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerRestartRuleOnExitCodes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerRestartRuleOnExitCodes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = ContainerRestartRuleOnExitCodesOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Values) == 0 { + m.Values = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -38790,13 +40009,196 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var msglen int + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38806,31 +40208,14 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} - } - if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38858,11 +40243,11 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38891,7 +40276,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.NodeName = &s + m.AppProtocol = &s iNdEx = postIndex default: iNdEx = preIndex @@ -38914,7 +40299,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointPort) Unmarshal(dAtA []byte) error { +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -38937,17 +40322,17 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38957,48 +40342,31 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39008,29 +40376,31 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39040,24 +40410,25 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.AppProtocol = &s + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -39080,7 +40451,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointSubset) Unmarshal(dAtA []byte) error { +func (m *Endpoints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39103,15 +40474,15 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39138,48 +40509,13 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39206,8 +40542,8 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, EndpointPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39232,7 +40568,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } return nil } -func (m *Endpoints) Unmarshal(dAtA []byte) error { +func (m *EndpointsList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39255,15 +40591,15 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39290,13 +40626,13 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39323,8 +40659,8 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39349,7 +40685,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointsList) Unmarshal(dAtA []byte) error { +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39372,15 +40708,47 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39407,13 +40775,16 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} + } + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39440,8 +40811,10 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39466,7 +40839,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvFromSource) Unmarshal(dAtA []byte) error { +func (m *EnvVar) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39489,15 +40862,15 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -39525,13 +40898,13 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Prefix = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39541,31 +40914,27 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &ConfigMapEnvSource{} - } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39592,10 +40961,10 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretEnvSource{} + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39620,7 +40989,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvVar) Unmarshal(dAtA []byte) error { +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -39643,79 +41012,15 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39742,66 +41047,16 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVarSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39828,16 +41083,16 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39864,16 +41119,16 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39900,16 +41155,16 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &ConfigMapKeySelector{} + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FileKeyRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -39936,10 +41191,10 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &SecretKeySelector{} + if m.FileKeyRef == nil { + m.FileKeyRef = &FileKeySelector{} } - if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FileKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -40700,46 +41955,116 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 21: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40766,16 +42091,16 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 22: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40785,31 +42110,28 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartupProbe == nil { - m.StartupProbe = &Probe{} - } - if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) + m.RestartPolicy = &s iNdEx = postIndex - case 23: + case 25: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicyRules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40836,44 +42158,11 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) - if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.RestartPolicyRules = append(m.RestartPolicyRules, ContainerRestartRule{}) + if err := m.RestartPolicyRules[len(m.RestartPolicyRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) - m.RestartPolicy = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42113,6 +43402,173 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *FileKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -55916,17 +57372,297 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCertificateProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCertificateProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodCertificateProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -55936,31 +57672,29 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SignerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeyType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -55970,81 +57704,29 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.KeyType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType) } - var v int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56054,17 +57736,17 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Stdin = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + m.MaxExpirationSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialBundlePath", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56074,37 +57756,29 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Stdout = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.Stderr = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var v int + m.CredentialBundlePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPath", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56114,15 +57788,27 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.TTY = bool(v != 0) - case 5: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CertificateChainPath", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -56150,7 +57836,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Container = string(dAtA[iNdEx:postIndex]) + m.CertificateChainPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -56607,15 +58293,210 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodExecOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -56643,11 +58524,11 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Container = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -56675,8 +58556,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Value = &s + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -56699,7 +58579,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodExecOptions) Unmarshal(dAtA []byte) error { +func (m *PodExtendedResourceClaimStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -56722,97 +58602,17 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodExtendedResourceClaimStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodExtendedResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdout = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stderr = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TTY = bool(v != 0) - case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequestMappings", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56822,27 +58622,29 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Container = string(dAtA[iNdEx:postIndex]) + m.RequestMappings = append(m.RequestMappings, ContainerExtendedResourceRequest{}) + if err := m.RequestMappings[len(m.RequestMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -56870,7 +58672,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + m.ResourceClaimName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -60088,6 +61890,39 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostnameOverride", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HostnameOverride = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -60687,6 +62522,42 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { break } } + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceClaimStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtendedResourceClaimStatus == nil { + m.ExtendedResourceClaimStatus = &PodExtendedResourceClaimStatus{} + } + if err := m.ExtendedResourceClaimStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -73489,6 +75360,42 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCertificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodCertificate == nil { + m.PodCertificate = &PodCertificateProjection{} + } + if err := m.PodCertificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/e2e/vendor/k8s.io/api/core/v1/generated.proto b/e2e/vendor/k8s.io/api/core/v1/generated.proto index 9b48fb1c398..fb26953147d 100644 --- a/e2e/vendor/k8s.io/api/core/v1/generated.proto +++ b/e2e/vendor/k8s.io/api/core/v1/generated.proto @@ -737,8 +737,8 @@ message Container { repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple + // The keys defined within a source may consist of any printable ASCII characters except '='. + // When a key exists in multiple // sources, the value associated with the last source will take precedence. // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. @@ -768,10 +768,10 @@ message Container { repeated ContainerResizePolicy resizePolicy = 23; // RestartPolicy defines the restart behavior of individual containers in a pod. - // This field may only be set for init containers, and the only allowed value is "Always". - // For non-init containers or when this field is not specified, + // This overrides the pod-level restart policy. When this field is not specified, // the restart behavior is defined by the Pod's restart policy and the container type. - // Setting the RestartPolicy as "Always" for the init container will have the following effect: + // Additionally, setting the RestartPolicy as "Always" for the init container will + // have the following effect: // this init container will be continually restarted on // exit until all regular containers have terminated. Once all regular // containers have completed, all init containers with restartPolicy "Always" @@ -786,6 +786,22 @@ message Container { // +optional optional string restartPolicy = 24; + // Represents a list of rules to be checked to determine if the + // container should be restarted on exit. The rules are evaluated in + // order. Once a rule matches a container exit condition, the remaining + // rules are ignored. If no rule matches the container exit condition, + // the Container-level restart policy determines the whether the container + // is restarted or not. Constraints on the rules: + // - At most 20 rules are allowed. + // - Rules can have the same action. + // - Identical rules are not forbidden in validations. + // When rules are specified, container MUST set RestartPolicy explicitly + // even it if matches the Pod's RestartPolicy. + // +featureGate=ContainerRestartRules + // +optional + // +listType=atomic + repeated ContainerRestartRule restartPolicyRules = 25; + // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -888,6 +904,19 @@ message Container { optional bool tty = 18; } +// ContainerExtendedResourceRequest has the mapping of container name, +// extended resource name to the device request name. +message ContainerExtendedResourceRequest { + // The name of the container requesting resources. + optional string containerName = 1; + + // The name of the extended resource in that container which gets backed by DRA. + optional string resourceName = 2; + + // The name of the request in the special ResourceClaim which corresponds to the extended resource. + optional string requestName = 3; +} + // Describe a container image message ContainerImage { // Names by which this image is known. @@ -942,6 +971,39 @@ message ContainerResizePolicy { optional string restartPolicy = 2; } +// ContainerRestartRule describes how a container exit is handled. +message ContainerRestartRule { + // Specifies the action taken on a container exit if the requirements + // are satisfied. The only possible value is "Restart" to restart the + // container. + // +required + optional string action = 1; + + // Represents the exit codes to check on container exits. + // +optional + // +oneOf=when + optional ContainerRestartRuleOnExitCodes exitCodes = 2; +} + +// ContainerRestartRuleOnExitCodes describes the condition +// for handling an exited container based on its exit codes. +message ContainerRestartRuleOnExitCodes { + // Represents the relationship between the container exit code(s) and the + // specified values. Possible values are: + // - In: the requirement is satisfied if the container exit code is in the + // set of specified values. + // - NotIn: the requirement is satisfied if the container exit code is + // not in the set of specified values. + // +required + optional string operator = 1; + + // Specifies the set of values to check for container exit codes. + // At most 255 elements are allowed. + // +optional + // +listType=set + repeated int32 values = 2; +} + // ContainerState holds a possible state of container. // Only one of its members may be specified. // If none of them is specified, the default one is ContainerStateWaiting. @@ -1344,7 +1406,8 @@ message EndpointsList { // EnvFromSource represents the source of a set of ConfigMaps or Secrets message EnvFromSource { - // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER. + // Optional text to prepend to the name of each environment variable. + // May consist of any printable ASCII characters except '='. // +optional optional string prefix = 1; @@ -1359,7 +1422,8 @@ message EnvFromSource { // EnvVar represents an environment variable present in a Container. message EnvVar { - // Name of the environment variable. Must be a C_IDENTIFIER. + // Name of the environment variable. + // May consist of any printable ASCII characters except '='. optional string name = 1; // Variable references $(VAR_NAME) are expanded @@ -1398,6 +1462,13 @@ message EnvVarSource { // Selects a key of a secret in the pod's namespace // +optional optional SecretKeySelector secretKeyRef = 4; + + // FileKeyRef selects a key of the env file. + // Requires the EnvFiles feature gate to be enabled. + // + // +featureGate=EnvFiles + // +optional + optional FileKeySelector fileKeyRef = 5; } // An EphemeralContainer is a temporary container that you may add to an existing Pod for @@ -1479,8 +1550,8 @@ message EphemeralContainerCommon { repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple + // The keys defined within a source may consist of any printable ASCII characters except '='. + // When a key exists in multiple // sources, the value associated with the last source will take precedence. // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. @@ -1510,12 +1581,19 @@ message EphemeralContainerCommon { // Restart policy for the container to manage the restart behavior of each // container within a pod. - // This may only be set for init containers. You cannot set this field on - // ephemeral containers. + // You cannot set this field on ephemeral containers. // +featureGate=SidecarContainers // +optional optional string restartPolicy = 24; + // Represents a list of rules to be checked to determine if the + // container should be restarted on exit. You cannot set this field on + // ephemeral containers. + // +featureGate=ContainerRestartRules + // +optional + // +listType=atomic + repeated ContainerRestartRule restartPolicyRules = 25; + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -1776,6 +1854,36 @@ message FCVolumeSource { repeated string wwids = 5; } +// FileKeySelector selects a key of the env file. +// +structType=atomic +message FileKeySelector { + // The name of the volume mount containing the env file. + // +required + optional string volumeName = 1; + + // The path within the volume from which to select the file. + // Must be relative and may not contain the '..' path or start with '..'. + // +required + optional string path = 2; + + // The key within the env file. An invalid key will prevent the pod from starting. + // The keys defined within a source may consist of any printable ASCII characters except '='. + // During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + // +required + optional string key = 3; + + // Specify whether the file or its key must be defined. If the file or key + // does not exist, then the env var is not published. + // If optional is set to true and the specified key does not exist, + // the environment variable will not be set in the Pod's containers. + // + // If optional is set to false and the specified key does not exist, + // an error will be returned during Pod creation. + // +optional + // +default=false + optional bool optional = 4; +} + // FlexPersistentVolumeSource represents a generic persistent volume resource that is // provisioned/attached using an exec based plugin. message FlexPersistentVolumeSource { @@ -1949,7 +2057,6 @@ message GlusterfsPersistentVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { // endpoints is the endpoint name that details Glusterfs topology. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // path is the Glusterfs volume path. @@ -3160,15 +3267,13 @@ message PersistentVolumeClaimSpec { // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. // If specified, the CSI driver will create or update the volume with the attributes defined // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - // will be set by the persistentvolume controller if it exists. + // it can be changed after the claim is created. An empty string or nil value indicates that no + // VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + // this field can be reset to its previous value (including nil) to cancel the modification. // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 9; @@ -3267,14 +3372,12 @@ message PersistentVolumeClaimStatus { // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string currentVolumeAttributesClassName = 8; // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional ModifyVolumeStatus modifyVolumeStatus = 9; @@ -3515,7 +3618,6 @@ message PersistentVolumeSpec { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 10; @@ -3684,8 +3786,8 @@ message PodAntiAffinity { // most preferred is the one with the greatest sum of weights, i.e. // for each node that meets all of the scheduling requirements (resource // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // compute a sum by iterating through the elements of this field and subtracting + // "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional // +listType=atomic @@ -3725,6 +3827,79 @@ message PodAttachOptions { optional string container = 5; } +// PodCertificateProjection provides a private key and X.509 certificate in the +// pod filesystem. +message PodCertificateProjection { + // Kubelet's generated CSRs will be addressed to this signer. + // + // +required + optional string signerName = 1; + + // The type of keypair Kubelet will generate for the pod. + // + // Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + // "ECDSAP521", and "ED25519". + // + // +required + optional string keyType = 2; + + // maxExpirationSeconds is the maximum lifetime permitted for the + // certificate. + // + // Kubelet copies this value verbatim into the PodCertificateRequests it + // generates for this projection. + // + // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + // will reject values shorter than 3600 (1 hour). The maximum allowable + // value is 7862400 (91 days). + // + // The signer implementation is then free to issue a certificate with any + // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + // seconds (1 hour). This constraint is enforced by kube-apiserver. + // `kubernetes.io` signers will never issue certificates with a lifetime + // longer than 24 hours. + // + // +optional + optional int32 maxExpirationSeconds = 3; + + // Write the credential bundle at this path in the projected volume. + // + // The credential bundle is a single file that contains multiple PEM blocks. + // The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + // key. + // + // The remaining blocks are CERTIFICATE blocks, containing the issued + // certificate chain from the signer (leaf and any intermediates). + // + // Using credentialBundlePath lets your Pod's application code make a single + // atomic read that retrieves a consistent key and certificate chain. If you + // project them to separate files, your application code will need to + // additionally check that the leaf certificate was issued to the key. + // + // +optional + optional string credentialBundlePath = 4; + + // Write the key at this path in the projected volume. + // + // Most applications should use credentialBundlePath. When using keyPath + // and certificateChainPath, your application needs to check that the key + // and leaf certificate are consistent, because it is possible to read the + // files mid-rotation. + // + // +optional + optional string keyPath = 5; + + // Write the certificate chain at this path in the projected volume. + // + // Most applications should use credentialBundlePath. When using keyPath + // and certificateChainPath, your application needs to check that the key + // and leaf certificate are consistent, because it is possible to read the + // files mid-rotation. + // + // +optional + optional string certificateChainPath = 6; +} + // PodCondition contains details for the current condition of this pod. message PodCondition { // Type is the type of the condition. @@ -3829,6 +4004,20 @@ message PodExecOptions { repeated string command = 6; } +// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended +// resource requests backed by DRA. It stores the generated name for +// the corresponding special ResourceClaim created by the scheduler. +message PodExtendedResourceClaimStatus { + // RequestMappings identifies the mapping of to device request + // in the generated ResourceClaim. + // +listType=atomic + repeated ContainerExtendedResourceRequest requestMappings = 1; + + // ResourceClaimName is the name of the ResourceClaim that was + // generated for the Pod in the namespace of the Pod. + optional string resourceClaimName = 2; +} + // PodIP represents a single IP address allocated to the pod. message PodIP { // IP is the IP address assigned to the pod @@ -4269,7 +4458,9 @@ message PodSpec { optional string nodeName = 10; // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. + // When using HostNetwork you should specify ports so the scheduler is aware. + // When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, + // and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. // Default to false. // +k8s:conversion-gen=false // +optional @@ -4434,6 +4625,7 @@ message PodSpec { // - spec.hostPID // - spec.hostIPC // - spec.hostUsers + // - spec.resources // - spec.securityContext.appArmorProfile // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile @@ -4504,7 +4696,7 @@ message PodSpec { // Resources is the total amount of CPU and Memory resources required by all // containers in the pod. It supports specifying Requests and Limits for - // "cpu" and "memory" resource names only. ResourceClaims are not supported. + // "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported. // // This field enables fine-grained control over resource allocation for the // entire pod, allowing resource sharing among containers in a pod. @@ -4516,6 +4708,21 @@ message PodSpec { // +featureGate=PodLevelResources // +optional optional ResourceRequirements resources = 40; + + // HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. + // This field only specifies the pod's hostname and does not affect its DNS records. + // When this field is set to a non-empty string: + // - It takes precedence over the values set in `hostname` and `subdomain`. + // - The Pod's hostname will be set to this value. + // - `setHostnameAsFQDN` must be nil or set to false. + // - `hostNetwork` must be set to false. + // + // This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. + // Requires the HostnameOverride feature gate to be enabled. + // + // +featureGate=HostnameOverride + // +optional + optional string hostnameOverride = 41; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -4674,6 +4881,11 @@ message PodStatus { // +featureGate=DynamicResourceAllocation // +optional repeated PodResourceClaimStatus resourceClaimStatuses = 15; + + // Status of extended resource claim backed by DRA. + // +featureGate=DRAExtendedResource + // +optional + optional PodExtendedResourceClaimStatus extendedResourceClaimStatus = 18; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded @@ -5298,7 +5510,7 @@ message ResourceRequirements { // Claims lists the names of resources, defined in spec.resourceClaims, // that are used by this container. // - // This is an alpha field and requires enabling the + // This field depends on the // DynamicResourceAllocation feature gate. // // This field is immutable. It can only be set for containers. @@ -6301,7 +6513,6 @@ message Taint { optional string effect = 3; // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; } @@ -6682,6 +6893,44 @@ message VolumeProjection { // +featureGate=ClusterTrustBundleProjection // +optional optional ClusterTrustBundleProjection clusterTrustBundle = 5; + + // Projects an auto-rotating credential bundle (private key and certificate + // chain) that the pod can use either as a TLS client or server. + // + // Kubelet generates a private key and uses it to send a + // PodCertificateRequest to the named signer. Once the signer approves the + // request and issues a certificate chain, Kubelet writes the key and + // certificate chain to the pod filesystem. The pod does not start until + // certificates have been issued for each podCertificate projected volume + // source in its spec. + // + // Kubelet will begin trying to rotate the certificate at the time indicated + // by the signer using the PodCertificateRequest.Status.BeginRefreshAt + // timestamp. + // + // Kubelet can write a single file, indicated by the credentialBundlePath + // field, or separate files, indicated by the keyPath and + // certificateChainPath fields. + // + // The credential bundle is a single file in PEM format. The first PEM + // entry is the private key (in PKCS#8 format), and the remaining PEM + // entries are the certificate chain issued by the signer (typically, + // signers will return their certificate chain in leaf-to-root order). + // + // Prefer using the credential bundle format, since your application code + // can read it atomically. If you use keyPath and certificateChainPath, + // your application must make two separate file reads. If these coincide + // with a certificate rotation, it is possible that the private key and leaf + // certificate you read may not correspond to each other. Your application + // will need to check for this condition, and re-read until they are + // consistent. + // + // The named signer controls chooses the format of the certificate it + // issues; consult the signer implementation's documentation to learn how to + // use the certificates it issues. + // + // +featureGate=PodCertificateProjection +optional + optional PodCertificateProjection podCertificate = 6; } // VolumeResourceRequirements describes the storage resource requirements for a volume. @@ -6753,13 +7002,12 @@ message VolumeSource { // iscsi represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://examples.k8s.io/volumes/iscsi/README.md + // More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi // +optional optional ISCSIVolumeSource iscsi = 8; // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; @@ -6771,7 +7019,6 @@ message VolumeSource { // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; diff --git a/e2e/vendor/k8s.io/api/core/v1/types.go b/e2e/vendor/k8s.io/api/core/v1/types.go index f7641e485aa..08b6d351cc6 100644 --- a/e2e/vendor/k8s.io/api/core/v1/types.go +++ b/e2e/vendor/k8s.io/api/core/v1/types.go @@ -91,12 +91,11 @@ type VolumeSource struct { NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // iscsi represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://examples.k8s.io/volumes/iscsi/README.md + // More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // persistentVolumeClaimVolumeSource represents a reference to a @@ -106,7 +105,6 @@ type VolumeSource struct { PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // flexVolume represents a generic volume resource that is @@ -437,7 +435,6 @@ type PersistentVolumeSpec struct { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"` @@ -616,15 +613,13 @@ type PersistentVolumeClaimSpec struct { // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. // If specified, the CSI driver will create or update the volume with the attributes defined // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - // will be set by the persistentvolume controller if it exists. + // it can be changed after the claim is created. An empty string or nil value indicates that no + // VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + // this field can be reset to its previous value (including nil) to cancel the modification. // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` @@ -851,13 +846,11 @@ type PersistentVolumeClaimStatus struct { AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"` // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"` @@ -972,7 +965,6 @@ type EmptyDirVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // endpoints is the endpoint name that details Glusterfs topology. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // path is the Glusterfs volume path. @@ -1993,6 +1985,79 @@ type ClusterTrustBundleProjection struct { Path string `json:"path" protobuf:"bytes,4,rep,name=path"` } +// PodCertificateProjection provides a private key and X.509 certificate in the +// pod filesystem. +type PodCertificateProjection struct { + // Kubelet's generated CSRs will be addressed to this signer. + // + // +required + SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,rep,name=signerName"` + + // The type of keypair Kubelet will generate for the pod. + // + // Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + // "ECDSAP521", and "ED25519". + // + // +required + KeyType string `json:"keyType,omitempty" protobuf:"bytes,2,rep,name=keyType"` + + // maxExpirationSeconds is the maximum lifetime permitted for the + // certificate. + // + // Kubelet copies this value verbatim into the PodCertificateRequests it + // generates for this projection. + // + // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + // will reject values shorter than 3600 (1 hour). The maximum allowable + // value is 7862400 (91 days). + // + // The signer implementation is then free to issue a certificate with any + // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + // seconds (1 hour). This constraint is enforced by kube-apiserver. + // `kubernetes.io` signers will never issue certificates with a lifetime + // longer than 24 hours. + // + // +optional + MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,3,opt,name=maxExpirationSeconds"` + + // Write the credential bundle at this path in the projected volume. + // + // The credential bundle is a single file that contains multiple PEM blocks. + // The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + // key. + // + // The remaining blocks are CERTIFICATE blocks, containing the issued + // certificate chain from the signer (leaf and any intermediates). + // + // Using credentialBundlePath lets your Pod's application code make a single + // atomic read that retrieves a consistent key and certificate chain. If you + // project them to separate files, your application code will need to + // additionally check that the leaf certificate was issued to the key. + // + // +optional + CredentialBundlePath string `json:"credentialBundlePath,omitempty" protobuf:"bytes,4,rep,name=credentialBundlePath"` + + // Write the key at this path in the projected volume. + // + // Most applications should use credentialBundlePath. When using keyPath + // and certificateChainPath, your application needs to check that the key + // and leaf certificate are consistent, because it is possible to read the + // files mid-rotation. + // + // +optional + KeyPath string `json:"keyPath,omitempty" protobuf:"bytes,5,rep,name=keyPath"` + + // Write the certificate chain at this path in the projected volume. + // + // Most applications should use credentialBundlePath. When using keyPath + // and certificateChainPath, your application needs to check that the key + // and leaf certificate are consistent, because it is possible to read the + // files mid-rotation. + // + // +optional + CertificateChainPath string `json:"certificateChainPath,omitempty" protobuf:"bytes,6,rep,name=certificateChainPath"` +} + // Represents a projected volume source type ProjectedVolumeSource struct { // sources is the list of volume projections. Each entry in this list @@ -2043,6 +2108,44 @@ type VolumeProjection struct { // +featureGate=ClusterTrustBundleProjection // +optional ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"` + + // Projects an auto-rotating credential bundle (private key and certificate + // chain) that the pod can use either as a TLS client or server. + // + // Kubelet generates a private key and uses it to send a + // PodCertificateRequest to the named signer. Once the signer approves the + // request and issues a certificate chain, Kubelet writes the key and + // certificate chain to the pod filesystem. The pod does not start until + // certificates have been issued for each podCertificate projected volume + // source in its spec. + // + // Kubelet will begin trying to rotate the certificate at the time indicated + // by the signer using the PodCertificateRequest.Status.BeginRefreshAt + // timestamp. + // + // Kubelet can write a single file, indicated by the credentialBundlePath + // field, or separate files, indicated by the keyPath and + // certificateChainPath fields. + // + // The credential bundle is a single file in PEM format. The first PEM + // entry is the private key (in PKCS#8 format), and the remaining PEM + // entries are the certificate chain issued by the signer (typically, + // signers will return their certificate chain in leaf-to-root order). + // + // Prefer using the credential bundle format, since your application code + // can read it atomically. If you use keyPath and certificateChainPath, + // your application must make two separate file reads. If these coincide + // with a certificate rotation, it is possible that the private key and leaf + // certificate you read may not correspond to each other. Your application + // will need to check for this condition, and re-read until they are + // consistent. + // + // The named signer controls chooses the format of the certificate it + // issues; consult the signer implementation's documentation to learn how to + // use the certificates it issues. + // + // +featureGate=PodCertificateProjection +optional + PodCertificate *PodCertificateProjection `json:"podCertificate,omitempty" protobuf:"bytes,6,opt,name=podCertificate"` } const ( @@ -2351,7 +2454,8 @@ type VolumeDevice struct { // EnvVar represents an environment variable present in a Container. type EnvVar struct { - // Name of the environment variable. Must be a C_IDENTIFIER. + // Name of the environment variable. + // May consist of any printable ASCII characters except '='. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Optional: no more than one of the following may be specified. @@ -2388,6 +2492,39 @@ type EnvVarSource struct { // Selects a key of a secret in the pod's namespace // +optional SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"` + // FileKeyRef selects a key of the env file. + // Requires the EnvFiles feature gate to be enabled. + // + // +featureGate=EnvFiles + // +optional + FileKeyRef *FileKeySelector `json:"fileKeyRef,omitempty" protobuf:"bytes,5,opt,name=fileKeyRef"` +} + +// FileKeySelector selects a key of the env file. +// +structType=atomic +type FileKeySelector struct { + // The name of the volume mount containing the env file. + // +required + VolumeName string `json:"volumeName" protobuf:"bytes,1,opt,name=volumeName"` + // The path within the volume from which to select the file. + // Must be relative and may not contain the '..' path or start with '..'. + // +required + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + // The key within the env file. An invalid key will prevent the pod from starting. + // The keys defined within a source may consist of any printable ASCII characters except '='. + // During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + // +required + Key string `json:"key" protobuf:"bytes,3,opt,name=key"` + // Specify whether the file or its key must be defined. If the file or key + // does not exist, then the env var is not published. + // If optional is set to true and the specified key does not exist, + // the environment variable will not be set in the Pod's containers. + // + // If optional is set to false and the specified key does not exist, + // an error will be returned during Pod creation. + // +optional + // +default=false + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } // ObjectFieldSelector selects an APIVersioned field of an object. @@ -2439,7 +2576,8 @@ type SecretKeySelector struct { // EnvFromSource represents the source of a set of ConfigMaps or Secrets type EnvFromSource struct { - // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER. + // Optional text to prepend to the name of each environment variable. + // May consist of any printable ASCII characters except '='. // +optional Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` // The ConfigMap to select from @@ -2697,7 +2835,7 @@ type ResourceRequirements struct { // Claims lists the names of resources, defined in spec.resourceClaims, // that are used by this container. // - // This is an alpha field and requires enabling the + // This field depends on the // DynamicResourceAllocation feature gate. // // This field is immutable. It can only be set for containers. @@ -2805,8 +2943,8 @@ type Container struct { // +listMapKey=protocol Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple + // The keys defined within a source may consist of any printable ASCII characters except '='. + // When a key exists in multiple // sources, the value associated with the last source will take precedence. // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. @@ -2832,10 +2970,10 @@ type Container struct { // +listType=atomic ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // RestartPolicy defines the restart behavior of individual containers in a pod. - // This field may only be set for init containers, and the only allowed value is "Always". - // For non-init containers or when this field is not specified, + // This overrides the pod-level restart policy. When this field is not specified, // the restart behavior is defined by the Pod's restart policy and the container type. - // Setting the RestartPolicy as "Always" for the init container will have the following effect: + // Additionally, setting the RestartPolicy as "Always" for the init container will + // have the following effect: // this init container will be continually restarted on // exit until all regular containers have terminated. Once all regular // containers have completed, all init containers with restartPolicy "Always" @@ -2849,6 +2987,21 @@ type Container struct { // +featureGate=SidecarContainers // +optional RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"` + // Represents a list of rules to be checked to determine if the + // container should be restarted on exit. The rules are evaluated in + // order. Once a rule matches a container exit condition, the remaining + // rules are ignored. If no rule matches the container exit condition, + // the Container-level restart policy determines the whether the container + // is restarted or not. Constraints on the rules: + // - At most 20 rules are allowed. + // - Rules can have the same action. + // - Identical rules are not forbidden in validations. + // When rules are specified, container MUST set RestartPolicy explicitly + // even it if matches the Pod's RestartPolicy. + // +featureGate=ContainerRestartRules + // +optional + // +listType=atomic + RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -3478,11 +3631,64 @@ const ( ) // ContainerRestartPolicy is the restart policy for a single container. -// This may only be set for init containers and only allowed value is "Always". +// The only allowed values are "Always", "Never", and "OnFailure". type ContainerRestartPolicy string const ( - ContainerRestartPolicyAlways ContainerRestartPolicy = "Always" + ContainerRestartPolicyAlways ContainerRestartPolicy = "Always" + ContainerRestartPolicyNever ContainerRestartPolicy = "Never" + ContainerRestartPolicyOnFailure ContainerRestartPolicy = "OnFailure" +) + +// ContainerRestartRule describes how a container exit is handled. +type ContainerRestartRule struct { + // Specifies the action taken on a container exit if the requirements + // are satisfied. The only possible value is "Restart" to restart the + // container. + // +required + Action ContainerRestartRuleAction `json:"action,omitempty" proto:"bytes,1,opt,name=action" protobuf:"bytes,1,opt,name=action,casttype=ContainerRestartRuleAction"` + + // Represents the exit codes to check on container exits. + // +optional + // +oneOf=when + ExitCodes *ContainerRestartRuleOnExitCodes `json:"exitCodes,omitempty" proto:"bytes,2,opt,name=exitCodes" protobuf:"bytes,2,opt,name=exitCodes"` +} + +// ContainerRestartRuleAction describes the action to take when the +// container exits. +type ContainerRestartRuleAction string + +// The only valid action is Restart. +const ( + ContainerRestartRuleActionRestart ContainerRestartRuleAction = "Restart" +) + +// ContainerRestartRuleOnExitCodes describes the condition +// for handling an exited container based on its exit codes. +type ContainerRestartRuleOnExitCodes struct { + // Represents the relationship between the container exit code(s) and the + // specified values. Possible values are: + // - In: the requirement is satisfied if the container exit code is in the + // set of specified values. + // - NotIn: the requirement is satisfied if the container exit code is + // not in the set of specified values. + // +required + Operator ContainerRestartRuleOnExitCodesOperator `json:"operator,omitempty" proto:"bytes,1,opt,name=operator" protobuf:"bytes,1,opt,name=operator,casttype=ContainerRestartRuleOnExitCodesOperator"` + + // Specifies the set of values to check for container exit codes. + // At most 255 elements are allowed. + // +optional + // +listType=set + Values []int32 `json:"values,omitempty" proto:"varint,2,rep,name=values" protobuf:"varint,2,rep,name=values"` +} + +// ContainerRestartRuleOnExitCodesOperator describes the operator +// to take for the exit codes. +type ContainerRestartRuleOnExitCodesOperator string + +const ( + ContainerRestartRuleOnExitCodesOpIn ContainerRestartRuleOnExitCodesOperator = "In" + ContainerRestartRuleOnExitCodesOpNotIn ContainerRestartRuleOnExitCodesOperator = "NotIn" ) // DNSPolicy defines how a pod's DNS will be configured. @@ -3678,8 +3884,8 @@ type PodAntiAffinity struct { // most preferred is the one with the greatest sum of weights, i.e. // for each node that meets all of the scheduling requirements (resource // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // compute a sum by iterating through the elements of this field and subtracting + // "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional // +listType=atomic @@ -3806,7 +4012,6 @@ type Taint struct { // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. // +optional TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` } @@ -3983,7 +4188,9 @@ type PodSpec struct { // +optional NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. + // When using HostNetwork you should specify ports so the scheduler is aware. + // When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, + // and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. // Default to false. // +k8s:conversion-gen=false // +optional @@ -4126,6 +4333,7 @@ type PodSpec struct { // - spec.hostPID // - spec.hostIPC // - spec.hostUsers + // - spec.resources // - spec.securityContext.appArmorProfile // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile @@ -4194,7 +4402,7 @@ type PodSpec struct { ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` // Resources is the total amount of CPU and Memory resources required by all // containers in the pod. It supports specifying Requests and Limits for - // "cpu" and "memory" resource names only. ResourceClaims are not supported. + // "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported. // // This field enables fine-grained control over resource allocation for the // entire pod, allowing resource sharing among containers in a pod. @@ -4206,6 +4414,20 @@ type PodSpec struct { // +featureGate=PodLevelResources // +optional Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"` + // HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. + // This field only specifies the pod's hostname and does not affect its DNS records. + // When this field is set to a non-empty string: + // - It takes precedence over the values set in `hostname` and `subdomain`. + // - The Pod's hostname will be set to this value. + // - `setHostnameAsFQDN` must be nil or set to false. + // - `hostNetwork` must be set to false. + // + // This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. + // Requires the HostnameOverride feature gate to be enabled. + // + // +featureGate=HostnameOverride + // +optional + HostnameOverride *string `json:"hostnameOverride,omitempty" protobuf:"bytes,41,opt,name=hostnameOverride"` } // PodResourceClaim references exactly one ResourceClaim, either directly @@ -4267,6 +4489,31 @@ type PodResourceClaimStatus struct { ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimName"` } +// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended +// resource requests backed by DRA. It stores the generated name for +// the corresponding special ResourceClaim created by the scheduler. +type PodExtendedResourceClaimStatus struct { + // RequestMappings identifies the mapping of to device request + // in the generated ResourceClaim. + // +listType=atomic + RequestMappings []ContainerExtendedResourceRequest `json:"requestMappings" protobuf:"bytes,1,rep,name=requestMappings"` + + // ResourceClaimName is the name of the ResourceClaim that was + // generated for the Pod in the namespace of the Pod. + ResourceClaimName string `json:"resourceClaimName" protobuf:"bytes,2,name=resourceClaimName"` +} + +// ContainerExtendedResourceRequest has the mapping of container name, +// extended resource name to the device request name. +type ContainerExtendedResourceRequest struct { + // The name of the container requesting resources. + ContainerName string `json:"containerName" protobuf:"bytes,1,name=containerName"` + // The name of the extended resource in that container which gets backed by DRA. + ResourceName string `json:"resourceName" protobuf:"bytes,2,name=resourceName"` + // The name of the request in the special ResourceClaim which corresponds to the extended resource. + RequestName string `json:"requestName" protobuf:"bytes,3,name=requestName"` +} + // OSName is the set of OS'es that can be used in OS. type OSName string @@ -4799,8 +5046,8 @@ type EphemeralContainerCommon struct { // +listMapKey=protocol Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple + // The keys defined within a source may consist of any printable ASCII characters except '='. + // When a key exists in multiple // sources, the value associated with the last source will take precedence. // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. @@ -4826,11 +5073,17 @@ type EphemeralContainerCommon struct { ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // Restart policy for the container to manage the restart behavior of each // container within a pod. - // This may only be set for init containers. You cannot set this field on - // ephemeral containers. + // You cannot set this field on ephemeral containers. // +featureGate=SidecarContainers // +optional RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"` + // Represents a list of rules to be checked to determine if the + // container should be restarted on exit. You cannot set this field on + // ephemeral containers. + // +featureGate=ContainerRestartRules + // +optional + // +listType=atomic + RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"` // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -5091,6 +5344,10 @@ type PodStatus struct { // +featureGate=DynamicResourceAllocation // +optional ResourceClaimStatuses []PodResourceClaimStatus `json:"resourceClaimStatuses,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,15,rep,name=resourceClaimStatuses"` + // Status of extended resource claim backed by DRA. + // +featureGate=DRAExtendedResource + // +optional + ExtendedResourceClaimStatus *PodExtendedResourceClaimStatus `json:"extendedResourceClaimStatus,omitempty" protobuf:"bytes,18,opt,name=extendedResourceClaimStatus"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -5311,6 +5568,7 @@ type ReplicationControllerCondition struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.0 +// +k8s:supportsSubresource=/scale // ReplicationController represents the configuration of a replication controller. type ReplicationController struct { diff --git a/e2e/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 9e987eefdd3..1204307667f 100644 --- a/e2e/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/e2e/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -356,11 +356,12 @@ var map_Container = map[string]string{ "args": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", - "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "resizePolicy": "Resources resize policy for the container.", - "restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", + "restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Additionally, setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", + "restartPolicyRules": "Represents a list of rules to be checked to determine if the container should be restarted on exit. The rules are evaluated in order. Once a rule matches a container exit condition, the remaining rules are ignored. If no rule matches the container exit condition, the Container-level restart policy determines the whether the container is restarted or not. Constraints on the rules: - At most 20 rules are allowed. - Rules can have the same action. - Identical rules are not forbidden in validations. When rules are specified, container MUST set RestartPolicy explicitly even it if matches the Pod's RestartPolicy.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", @@ -380,6 +381,17 @@ func (Container) SwaggerDoc() map[string]string { return map_Container } +var map_ContainerExtendedResourceRequest = map[string]string{ + "": "ContainerExtendedResourceRequest has the mapping of container name, extended resource name to the device request name.", + "containerName": "The name of the container requesting resources.", + "resourceName": "The name of the extended resource in that container which gets backed by DRA.", + "requestName": "The name of the request in the special ResourceClaim which corresponds to the extended resource.", +} + +func (ContainerExtendedResourceRequest) SwaggerDoc() map[string]string { + return map_ContainerExtendedResourceRequest +} + var map_ContainerImage = map[string]string{ "": "Describe a container image", "names": "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]", @@ -413,6 +425,26 @@ func (ContainerResizePolicy) SwaggerDoc() map[string]string { return map_ContainerResizePolicy } +var map_ContainerRestartRule = map[string]string{ + "": "ContainerRestartRule describes how a container exit is handled.", + "action": "Specifies the action taken on a container exit if the requirements are satisfied. The only possible value is \"Restart\" to restart the container.", + "exitCodes": "Represents the exit codes to check on container exits.", +} + +func (ContainerRestartRule) SwaggerDoc() map[string]string { + return map_ContainerRestartRule +} + +var map_ContainerRestartRuleOnExitCodes = map[string]string{ + "": "ContainerRestartRuleOnExitCodes describes the condition for handling an exited container based on its exit codes.", + "operator": "Represents the relationship between the container exit code(s) and the specified values. Possible values are: - In: the requirement is satisfied if the container exit code is in the\n set of specified values.\n- NotIn: the requirement is satisfied if the container exit code is\n not in the set of specified values.", + "values": "Specifies the set of values to check for container exit codes. At most 255 elements are allowed.", +} + +func (ContainerRestartRuleOnExitCodes) SwaggerDoc() map[string]string { + return map_ContainerRestartRuleOnExitCodes +} + var map_ContainerState = map[string]string{ "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", "waiting": "Details about a waiting container", @@ -597,7 +629,7 @@ func (EndpointsList) SwaggerDoc() map[string]string { var map_EnvFromSource = map[string]string{ "": "EnvFromSource represents the source of a set of ConfigMaps or Secrets", - "prefix": "Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.", + "prefix": "Optional text to prepend to the name of each environment variable. May consist of any printable ASCII characters except '='.", "configMapRef": "The ConfigMap to select from", "secretRef": "The Secret to select from", } @@ -608,7 +640,7 @@ func (EnvFromSource) SwaggerDoc() map[string]string { var map_EnvVar = map[string]string{ "": "EnvVar represents an environment variable present in a Container.", - "name": "Name of the environment variable. Must be a C_IDENTIFIER.", + "name": "Name of the environment variable. May consist of any printable ASCII characters except '='.", "value": "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.", } @@ -623,6 +655,7 @@ var map_EnvVarSource = map[string]string{ "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", "configMapKeyRef": "Selects a key of a ConfigMap.", "secretKeyRef": "Selects a key of a secret in the pod's namespace", + "fileKeyRef": "FileKeyRef selects a key of the env file. Requires the EnvFiles feature gate to be enabled.", } func (EnvVarSource) SwaggerDoc() map[string]string { @@ -646,11 +679,12 @@ var map_EphemeralContainerCommon = map[string]string{ "args": "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "Ports are not allowed for ephemeral containers.", - "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", "resizePolicy": "Resources resize policy for the container.", - "restartPolicy": "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.", + "restartPolicy": "Restart policy for the container to manage the restart behavior of each container within a pod. You cannot set this field on ephemeral containers.", + "restartPolicyRules": "Represents a list of rules to be checked to determine if the container should be restarted on exit. You cannot set this field on ephemeral containers.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Probes are not allowed for ephemeral containers.", @@ -754,6 +788,18 @@ func (FCVolumeSource) SwaggerDoc() map[string]string { return map_FCVolumeSource } +var map_FileKeySelector = map[string]string{ + "": "FileKeySelector selects a key of the env file.", + "volumeName": "The name of the volume mount containing the env file.", + "path": "The path within the volume from which to select the file. Must be relative and may not contain the '..' path or start with '..'.", + "key": "The key within the env file. An invalid key will prevent the pod from starting. The keys defined within a source may consist of any printable ASCII characters except '='. During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.", + "optional": "Specify whether the file or its key must be defined. If the file or key does not exist, then the env var is not published. If optional is set to true and the specified key does not exist, the environment variable will not be set in the Pod's containers.\n\nIf optional is set to false and the specified key does not exist, an error will be returned during Pod creation.", +} + +func (FileKeySelector) SwaggerDoc() map[string]string { + return map_FileKeySelector +} + var map_FlexPersistentVolumeSource = map[string]string{ "": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", "driver": "driver is the name of the driver to use for this volume.", @@ -837,7 +883,7 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "endpoints is the endpoint name that details Glusterfs topology.", "path": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "readOnly": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } @@ -1446,7 +1492,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", - "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).", + "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string or nil value indicates that no VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1461,8 +1507,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{ "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.", "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", - "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", + "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim", + "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted.", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1539,7 +1585,7 @@ var map_PersistentVolumeSpec = map[string]string{ "mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", - "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", + "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1606,7 +1652,7 @@ func (PodAffinityTerm) SwaggerDoc() map[string]string { var map_PodAntiAffinity = map[string]string{ "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", "requiredDuringSchedulingIgnoredDuringExecution": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and subtracting \"weight\" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", } func (PodAntiAffinity) SwaggerDoc() map[string]string { @@ -1626,6 +1672,20 @@ func (PodAttachOptions) SwaggerDoc() map[string]string { return map_PodAttachOptions } +var map_PodCertificateProjection = map[string]string{ + "": "PodCertificateProjection provides a private key and X.509 certificate in the pod filesystem.", + "signerName": "Kubelet's generated CSRs will be addressed to this signer.", + "keyType": "The type of keypair Kubelet will generate for the pod.\n\nValid values are \"RSA3072\", \"RSA4096\", \"ECDSAP256\", \"ECDSAP384\", \"ECDSAP521\", and \"ED25519\".", + "maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nKubelet copies this value verbatim into the PodCertificateRequests it generates for this projection.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.", + "credentialBundlePath": "Write the credential bundle at this path in the projected volume.\n\nThe credential bundle is a single file that contains multiple PEM blocks. The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private key.\n\nThe remaining blocks are CERTIFICATE blocks, containing the issued certificate chain from the signer (leaf and any intermediates).\n\nUsing credentialBundlePath lets your Pod's application code make a single atomic read that retrieves a consistent key and certificate chain. If you project them to separate files, your application code will need to additionally check that the leaf certificate was issued to the key.", + "keyPath": "Write the key at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.", + "certificateChainPath": "Write the certificate chain at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.", +} + +func (PodCertificateProjection) SwaggerDoc() map[string]string { + return map_PodCertificateProjection +} + var map_PodCondition = map[string]string{ "": "PodCondition contains details for the current condition of this pod.", "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", @@ -1676,6 +1736,16 @@ func (PodExecOptions) SwaggerDoc() map[string]string { return map_PodExecOptions } +var map_PodExtendedResourceClaimStatus = map[string]string{ + "": "PodExtendedResourceClaimStatus is stored in the PodStatus for the extended resource requests backed by DRA. It stores the generated name for the corresponding special ResourceClaim created by the scheduler.", + "requestMappings": "RequestMappings identifies the mapping of to device request in the generated ResourceClaim.", + "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod.", +} + +func (PodExtendedResourceClaimStatus) SwaggerDoc() map[string]string { + return map_PodExtendedResourceClaimStatus +} + var map_PodIP = map[string]string{ "": "PodIP represents a single IP address allocated to the pod.", "ip": "IP is the IP address assigned to the pod", @@ -1824,7 +1894,7 @@ var map_PodSpec = map[string]string{ "serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", "nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename", - "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", + "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. When using HostNetwork you should specify ports so the scheduler is aware. When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.", @@ -1846,11 +1916,12 @@ var map_PodSpec = map[string]string{ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", - "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", + "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.resources - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.", "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", - "resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.", + "resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\", \"memory\" and \"hugepages-\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.", + "hostnameOverride": "HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false.\n\nThis field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1858,24 +1929,25 @@ func (PodSpec) SwaggerDoc() map[string]string { } var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", - "observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.", - "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", - "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", - "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", - "hostIPs": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.", - "podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status", - "containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", - "ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.", - "resourceClaimStatuses": "Status of resource claims.", + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.", + "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "hostIP": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", + "hostIPs": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.", + "podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status", + "containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", + "ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.", + "resourceClaimStatuses": "Status of resource claims.", + "extendedResourceClaimStatus": "Status of extended resource claim backed by DRA.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -2205,7 +2277,7 @@ var map_ResourceRequirements = map[string]string{ "": "ResourceRequirements describes the compute resource requirements.", "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - "claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", + "claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis field depends on the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", } func (ResourceRequirements) SwaggerDoc() map[string]string { @@ -2587,7 +2659,7 @@ var map_Taint = map[string]string{ "key": "Required. The taint key to be applied to a node.", "value": "The taint value corresponding to the taint key.", "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", - "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", + "timeAdded": "TimeAdded represents the time at which the taint was added.", } func (Taint) SwaggerDoc() map[string]string { @@ -2727,6 +2799,7 @@ var map_VolumeProjection = map[string]string{ "configMap": "configMap information about the configMap data to project", "serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project", "clusterTrustBundle": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.", + "podCertificate": "Projects an auto-rotating credential bundle (private key and certificate chain) that the pod can use either as a TLS client or server.\n\nKubelet generates a private key and uses it to send a PodCertificateRequest to the named signer. Once the signer approves the request and issues a certificate chain, Kubelet writes the key and certificate chain to the pod filesystem. The pod does not start until certificates have been issued for each podCertificate projected volume source in its spec.\n\nKubelet will begin trying to rotate the certificate at the time indicated by the signer using the PodCertificateRequest.Status.BeginRefreshAt timestamp.\n\nKubelet can write a single file, indicated by the credentialBundlePath field, or separate files, indicated by the keyPath and certificateChainPath fields.\n\nThe credential bundle is a single file in PEM format. The first PEM entry is the private key (in PKCS#8 format), and the remaining PEM entries are the certificate chain issued by the signer (typically, signers will return their certificate chain in leaf-to-root order).\n\nPrefer using the credential bundle format, since your application code can read it atomically. If you use keyPath and certificateChainPath, your application must make two separate file reads. If these coincide with a certificate rotation, it is possible that the private key and leaf certificate you read may not correspond to each other. Your application will need to check for this condition, and re-read until they are consistent.\n\nThe named signer controls chooses the format of the certificate it issues; consult the signer implementation's documentation to learn how to use the certificates it issues.", } func (VolumeProjection) SwaggerDoc() map[string]string { @@ -2752,10 +2825,10 @@ var map_VolumeSource = map[string]string{ "gitRepo": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", - "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi", + "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.", "persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", + "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.", "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", diff --git a/e2e/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/e2e/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 619c525427e..bcd91bd0190 100644 --- a/e2e/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/e2e/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -829,6 +829,13 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(ContainerRestartPolicy) **out = **in } + if in.RestartPolicyRules != nil { + in, out := &in.RestartPolicyRules, &out.RestartPolicyRules + *out = make([]ContainerRestartRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -879,6 +886,22 @@ func (in *Container) DeepCopy() *Container { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerExtendedResourceRequest) DeepCopyInto(out *ContainerExtendedResourceRequest) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerExtendedResourceRequest. +func (in *ContainerExtendedResourceRequest) DeepCopy() *ContainerExtendedResourceRequest { + if in == nil { + return nil + } + out := new(ContainerExtendedResourceRequest) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { *out = *in @@ -932,6 +955,48 @@ func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRestartRule) DeepCopyInto(out *ContainerRestartRule) { + *out = *in + if in.ExitCodes != nil { + in, out := &in.ExitCodes, &out.ExitCodes + *out = new(ContainerRestartRuleOnExitCodes) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRule. +func (in *ContainerRestartRule) DeepCopy() *ContainerRestartRule { + if in == nil { + return nil + } + out := new(ContainerRestartRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRestartRuleOnExitCodes) DeepCopyInto(out *ContainerRestartRuleOnExitCodes) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRuleOnExitCodes. +func (in *ContainerRestartRuleOnExitCodes) DeepCopy() *ContainerRestartRuleOnExitCodes { + if in == nil { + return nil + } + out := new(ContainerRestartRuleOnExitCodes) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerState) DeepCopyInto(out *ContainerState) { *out = *in @@ -1433,6 +1498,11 @@ func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { *out = new(SecretKeySelector) (*in).DeepCopyInto(*out) } + if in.FileKeyRef != nil { + in, out := &in.FileKeyRef, &out.FileKeyRef + *out = new(FileKeySelector) + (*in).DeepCopyInto(*out) + } return } @@ -1506,6 +1576,13 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) *out = new(ContainerRestartPolicy) **out = **in } + if in.RestartPolicyRules != nil { + in, out := &in.RestartPolicyRules, &out.RestartPolicyRules + *out = make([]ContainerRestartRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -1736,6 +1813,27 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileKeySelector) DeepCopyInto(out *FileKeySelector) { + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileKeySelector. +func (in *FileKeySelector) DeepCopy() *FileKeySelector { + if in == nil { + return nil + } + out := new(FileKeySelector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { *out = *in @@ -3797,6 +3895,27 @@ func (in *PodAttachOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCertificateProjection) DeepCopyInto(out *PodCertificateProjection) { + *out = *in + if in.MaxExpirationSeconds != nil { + in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateProjection. +func (in *PodCertificateProjection) DeepCopy() *PodCertificateProjection { + if in == nil { + return nil + } + out := new(PodCertificateProjection) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodCondition) DeepCopyInto(out *PodCondition) { *out = *in @@ -3899,6 +4018,27 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodExtendedResourceClaimStatus) DeepCopyInto(out *PodExtendedResourceClaimStatus) { + *out = *in + if in.RequestMappings != nil { + in, out := &in.RequestMappings, &out.RequestMappings + *out = make([]ContainerExtendedResourceRequest, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExtendedResourceClaimStatus. +func (in *PodExtendedResourceClaimStatus) DeepCopy() *PodExtendedResourceClaimStatus { + if in == nil { + return nil + } + out := new(PodExtendedResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodIP) DeepCopyInto(out *PodIP) { *out = *in @@ -4412,6 +4552,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(ResourceRequirements) (*in).DeepCopyInto(*out) } + if in.HostnameOverride != nil { + in, out := &in.HostnameOverride, &out.HostnameOverride + *out = new(string) + **out = **in + } return } @@ -4477,6 +4622,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ExtendedResourceClaimStatus != nil { + in, out := &in.ExtendedResourceClaimStatus, &out.ExtendedResourceClaimStatus + *out = new(PodExtendedResourceClaimStatus) + (*in).DeepCopyInto(*out) + } return } @@ -6412,6 +6562,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { *out = new(ClusterTrustBundleProjection) (*in).DeepCopyInto(*out) } + if in.PodCertificate != nil { + in, out := &in.PodCertificate, &out.PodCertificate + *out = new(PodCertificateProjection) + (*in).DeepCopyInto(*out) + } return } diff --git a/e2e/vendor/k8s.io/api/extensions/v1beta1/doc.go b/e2e/vendor/k8s.io/api/extensions/v1beta1/doc.go index 7770fab5d21..be710973cb9 100644 --- a/e2e/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/e2e/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -18,5 +18,7 @@ limitations under the License. // +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true +// +k8s:validation-gen=TypeMeta +// +k8s:validation-gen-input=k8s.io/api/extensions/v1beta1 package v1beta1 diff --git a/e2e/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/e2e/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 70fcec0cc57..fed0b4835d6 100644 --- a/e2e/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/e2e/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -980,7 +980,7 @@ message RollingUpdateDaemonSet { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may @@ -1039,6 +1039,9 @@ message Scale { message ScaleSpec { // desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 optional int32 replicas = 1; } diff --git a/e2e/vendor/k8s.io/api/extensions/v1beta1/types.go b/e2e/vendor/k8s.io/api/extensions/v1beta1/types.go index b80a7a7e16b..c7b50e0590c 100644 --- a/e2e/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/e2e/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -27,6 +27,9 @@ import ( type ScaleSpec struct { // desired number of instances for the scaled object. // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } @@ -54,6 +57,7 @@ type ScaleStatus struct { // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.2 // +k8s:prerelease-lifecycle-gen:removed=1.16 +// +k8s:isSubresource=/scale // represents a scaling request for a resource. type Scale struct { @@ -398,7 +402,7 @@ type RollingUpdateDaemonSet struct { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may diff --git a/e2e/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 923fab3aa1d..8a158233efd 100644 --- a/e2e/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/e2e/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -482,7 +482,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { diff --git a/e2e/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go b/e2e/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go new file mode 100644 index 00000000000..6d2a1666ae3 --- /dev/null +++ b/e2e/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go @@ -0,0 +1,78 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by validation-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + fmt "fmt" + + operation "k8s.io/apimachinery/pkg/api/operation" + safe "k8s.io/apimachinery/pkg/api/safe" + validate "k8s.io/apimachinery/pkg/api/validate" + runtime "k8s.io/apimachinery/pkg/runtime" + field "k8s.io/apimachinery/pkg/util/validation/field" +) + +func init() { localSchemeBuilder.Register(RegisterValidations) } + +// RegisterValidations adds validation functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterValidations(scheme *runtime.Scheme) error { + scheme.AddValidationFunc((*Scale)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList { + switch op.Request.SubresourcePath() { + case "/scale": + return Validate_Scale(ctx, op, nil /* fldPath */, obj.(*Scale), safe.Cast[*Scale](oldObj)) + } + return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))} + }) + return nil +} + +func Validate_Scale(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *Scale) (errs field.ErrorList) { + // field Scale.TypeMeta has no validation + // field Scale.ObjectMeta has no validation + + // field Scale.Spec + errs = append(errs, + func(fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) { + errs = append(errs, Validate_ScaleSpec(ctx, op, fldPath, obj, oldObj)...) + return + }(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *Scale) *ScaleSpec { return &oldObj.Spec }))...) + + // field Scale.Status has no validation + return errs +} + +func Validate_ScaleSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) { + // field ScaleSpec.Replicas + errs = append(errs, + func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) { + // optional value-type fields with zero-value defaults are purely documentation + if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) { + return nil // no changes + } + errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...) + return + }(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *ScaleSpec) *int32 { return &oldObj.Replicas }))...) + + return errs +} diff --git a/e2e/vendor/k8s.io/api/networking/v1/generated.proto b/e2e/vendor/k8s.io/api/networking/v1/generated.proto index e3e3e9215e1..16a2792aa64 100644 --- a/e2e/vendor/k8s.io/api/networking/v1/generated.proto +++ b/e2e/vendor/k8s.io/api/networking/v1/generated.proto @@ -534,11 +534,12 @@ message NetworkPolicyPort { // NetworkPolicySpec provides the specification of a NetworkPolicy message NetworkPolicySpec { // podSelector selects the pods to which this NetworkPolicy object applies. - // The array of ingress rules is applied to any pods selected by this field. + // The array of rules is applied to any pods selected by this field. An empty + // selector matches all pods in the policy's namespace. // Multiple network policies can select the same set of pods. In this case, // the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. + // This field is optional. If it is not specified, it defaults to an empty selector. + // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // ingress is a list of ingress rules to be applied to the selected pods. diff --git a/e2e/vendor/k8s.io/api/networking/v1/types.go b/e2e/vendor/k8s.io/api/networking/v1/types.go index 216647ceeb4..7d9a4fc94cb 100644 --- a/e2e/vendor/k8s.io/api/networking/v1/types.go +++ b/e2e/vendor/k8s.io/api/networking/v1/types.go @@ -60,11 +60,12 @@ const ( // NetworkPolicySpec provides the specification of a NetworkPolicy type NetworkPolicySpec struct { // podSelector selects the pods to which this NetworkPolicy object applies. - // The array of ingress rules is applied to any pods selected by this field. + // The array of rules is applied to any pods selected by this field. An empty + // selector matches all pods in the policy's namespace. // Multiple network policies can select the same set of pods. In this case, // the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. + // This field is optional. If it is not specified, it defaults to an empty selector. + // +optional PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` // ingress is a list of ingress rules to be applied to the selected pods. diff --git a/e2e/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 0e294848bab..6210bb7a5aa 100644 --- a/e2e/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/e2e/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -313,7 +313,7 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { var map_NetworkPolicySpec = map[string]string{ "": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of rules is applied to any pods selected by this field. An empty selector matches all pods in the policy's namespace. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is optional. If it is not specified, it defaults to an empty selector.", "ingress": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", "egress": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", "policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", diff --git a/e2e/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/e2e/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go deleted file mode 100644 index 0d42034837f..00000000000 --- a/e2e/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1929 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/api/networking/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *IPAddress) Reset() { *m = IPAddress{} } -func (*IPAddress) ProtoMessage() {} -func (*IPAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{0} -} -func (m *IPAddress) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IPAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddress.Merge(m, src) -} -func (m *IPAddress) XXX_Size() int { - return m.Size() -} -func (m *IPAddress) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddress.DiscardUnknown(m) -} - -var xxx_messageInfo_IPAddress proto.InternalMessageInfo - -func (m *IPAddressList) Reset() { *m = IPAddressList{} } -func (*IPAddressList) ProtoMessage() {} -func (*IPAddressList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{1} -} -func (m *IPAddressList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IPAddressList) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressList.Merge(m, src) -} -func (m *IPAddressList) XXX_Size() int { - return m.Size() -} -func (m *IPAddressList) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressList.DiscardUnknown(m) -} - -var xxx_messageInfo_IPAddressList proto.InternalMessageInfo - -func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } -func (*IPAddressSpec) ProtoMessage() {} -func (*IPAddressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{2} -} -func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IPAddressSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressSpec.Merge(m, src) -} -func (m *IPAddressSpec) XXX_Size() int { - return m.Size() -} -func (m *IPAddressSpec) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo - -func (m *ParentReference) Reset() { *m = ParentReference{} } -func (*ParentReference) ProtoMessage() {} -func (*ParentReference) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{3} -} -func (m *ParentReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ParentReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParentReference.Merge(m, src) -} -func (m *ParentReference) XXX_Size() int { - return m.Size() -} -func (m *ParentReference) XXX_DiscardUnknown() { - xxx_messageInfo_ParentReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ParentReference proto.InternalMessageInfo - -func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } -func (*ServiceCIDR) ProtoMessage() {} -func (*ServiceCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{4} -} -func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceCIDR) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDR.Merge(m, src) -} -func (m *ServiceCIDR) XXX_Size() int { - return m.Size() -} -func (m *ServiceCIDR) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo - -func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } -func (*ServiceCIDRList) ProtoMessage() {} -func (*ServiceCIDRList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{5} -} -func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDRList.Merge(m, src) -} -func (m *ServiceCIDRList) XXX_Size() int { - return m.Size() -} -func (m *ServiceCIDRList) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo - -func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } -func (*ServiceCIDRSpec) ProtoMessage() {} -func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{6} -} -func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) -} -func (m *ServiceCIDRSpec) XXX_Size() int { - return m.Size() -} -func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo - -func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } -func (*ServiceCIDRStatus) ProtoMessage() {} -func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_c1cb39e7b48ce50d, []int{7} -} -func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) -} -func (m *ServiceCIDRStatus) XXX_Size() int { - return m.Size() -} -func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo - -func init() { - proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") - proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") - proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") - proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") - proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR") - proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList") - proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec") - proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus") -} - -func init() { - proto.RegisterFile("k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1cb39e7b48ce50d) -} - -var fileDescriptor_c1cb39e7b48ce50d = []byte{ - // 634 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4a, - 0x18, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0xb7, 0xb7, 0xb7, 0x5e, 0x45, 0x5d, 0x38, 0x91, 0xef, 0xa6, - 0x08, 0x3a, 0x26, 0x11, 0x42, 0x6c, 0x71, 0x2b, 0xa1, 0x4a, 0xd0, 0x96, 0xe9, 0x0a, 0xd4, 0x05, - 0xd3, 0xc9, 0x57, 0x67, 0x08, 0xfe, 0xd1, 0xcc, 0x24, 0xc0, 0x8e, 0x47, 0xe0, 0x05, 0x78, 0x0e, - 0x56, 0x20, 0xb1, 0xeb, 0xb2, 0xcb, 0xae, 0x2a, 0x6a, 0x5e, 0x04, 0xcd, 0xd8, 0xb1, 0x93, 0x46, - 0xfd, 0xdb, 0x74, 0xe7, 0xef, 0xcc, 0x39, 0x67, 0xbe, 0xf3, 0xcd, 0x8c, 0x8c, 0xf0, 0xf0, 0x99, - 0xc4, 0x3c, 0xf1, 0x69, 0xca, 0xfd, 0x18, 0xd4, 0xc7, 0x44, 0x0c, 0x79, 0x1c, 0xfa, 0xe3, 0x2e, - 0xfd, 0x90, 0x0e, 0x68, 0xd7, 0x0f, 0x21, 0x06, 0x41, 0x15, 0xf4, 0x71, 0x2a, 0x12, 0x95, 0x38, - 0x6e, 0xce, 0xc7, 0x34, 0xe5, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0x33, 0xe4, 0x6a, 0x30, 0x3a, - 0xc2, 0x2c, 0x89, 0xfc, 0x30, 0x09, 0x13, 0xdf, 0xc8, 0x8e, 0x46, 0xc7, 0xa6, 0x32, 0x85, 0xf9, - 0xca, 0xed, 0xd6, 0x9f, 0x54, 0xdb, 0x47, 0x94, 0x0d, 0x78, 0x0c, 0xe2, 0xb3, 0x9f, 0x0e, 0x43, - 0x0d, 0x48, 0x3f, 0x02, 0x45, 0xfd, 0xf1, 0x5c, 0x13, 0xeb, 0xfe, 0x55, 0x2a, 0x31, 0x8a, 0x15, - 0x8f, 0x60, 0x4e, 0xf0, 0xf4, 0x26, 0x81, 0x64, 0x03, 0x88, 0xe8, 0x65, 0x9d, 0xf7, 0xd3, 0x42, - 0xf6, 0xce, 0xfe, 0xf3, 0x7e, 0x5f, 0x80, 0x94, 0xce, 0x3b, 0xb4, 0xac, 0x3b, 0xea, 0x53, 0x45, - 0x5b, 0x56, 0xc7, 0xda, 0x68, 0xf6, 0x1e, 0xe3, 0x6a, 0x1c, 0xa5, 0x31, 0x4e, 0x87, 0xa1, 0x06, - 0x24, 0xd6, 0x6c, 0x3c, 0xee, 0xe2, 0xbd, 0xa3, 0xf7, 0xc0, 0xd4, 0x2b, 0x50, 0x34, 0x70, 0x4e, - 0xce, 0xdb, 0xb5, 0xec, 0xbc, 0x8d, 0x2a, 0x8c, 0x94, 0xae, 0xce, 0x1e, 0xaa, 0xcb, 0x14, 0x58, - 0x6b, 0xc1, 0xb8, 0x6f, 0xe2, 0xeb, 0x87, 0x8d, 0xcb, 0xd6, 0x0e, 0x52, 0x60, 0xc1, 0x3f, 0x85, - 0x75, 0x5d, 0x57, 0xc4, 0x18, 0x79, 0x3f, 0x2c, 0xb4, 0x52, 0xb2, 0x5e, 0x72, 0xa9, 0x9c, 0xc3, - 0xb9, 0x10, 0xf8, 0x76, 0x21, 0xb4, 0xda, 0x44, 0xf8, 0xaf, 0xd8, 0x67, 0x79, 0x82, 0x4c, 0x05, - 0xd8, 0x45, 0x0d, 0xae, 0x20, 0x92, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xf7, 0xe0, 0xd6, 0x09, - 0x82, 0x95, 0xc2, 0xb5, 0xb1, 0xa3, 0xf5, 0x24, 0xb7, 0xf1, 0xa2, 0xa9, 0xf6, 0x75, 0x2c, 0xe7, - 0x10, 0xd9, 0x29, 0x15, 0x10, 0x2b, 0x02, 0xc7, 0x45, 0xff, 0xfe, 0x4d, 0x9b, 0xec, 0x4f, 0x04, - 0x20, 0x20, 0x66, 0x10, 0xac, 0x64, 0xe7, 0x6d, 0xbb, 0x04, 0x49, 0x65, 0xe8, 0x7d, 0xb7, 0xd0, - 0xea, 0x25, 0xb6, 0xf3, 0x3f, 0x6a, 0x84, 0x22, 0x19, 0xa5, 0x66, 0x37, 0xbb, 0xea, 0xf3, 0x85, - 0x06, 0x49, 0xbe, 0xe6, 0x3c, 0x42, 0xcb, 0x02, 0x64, 0x32, 0x12, 0x0c, 0xcc, 0xe1, 0xd9, 0xd5, - 0x94, 0x48, 0x81, 0x93, 0x92, 0xe1, 0xf8, 0xc8, 0x8e, 0x69, 0x04, 0x32, 0xa5, 0x0c, 0x5a, 0x8b, - 0x86, 0xbe, 0x56, 0xd0, 0xed, 0xdd, 0xc9, 0x02, 0xa9, 0x38, 0x4e, 0x07, 0xd5, 0x75, 0xd1, 0xaa, - 0x1b, 0x6e, 0x79, 0xd0, 0x9a, 0x4b, 0xcc, 0x8a, 0xf7, 0x6d, 0x01, 0x35, 0x0f, 0x40, 0x8c, 0x39, - 0x83, 0xad, 0x9d, 0x6d, 0x72, 0x0f, 0x77, 0xf5, 0xf5, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a, - 0xbb, 0xea, 0xb6, 0x3a, 0x6f, 0xd0, 0x92, 0x54, 0x54, 0x8d, 0xa4, 0x19, 0x4a, 0xb3, 0xd7, 0xbd, - 0x8b, 0xa9, 0x11, 0x06, 0xff, 0x16, 0xb6, 0x4b, 0x79, 0x4d, 0x0a, 0x43, 0xef, 0x97, 0x85, 0x56, - 0xa7, 0xd8, 0xf7, 0xf0, 0x14, 0xf6, 0x67, 0x9f, 0xc2, 0xc3, 0x3b, 0x64, 0xb9, 0xe2, 0x31, 0xf4, - 0x66, 0x22, 0x98, 0xe7, 0xd0, 0x46, 0x0d, 0xc6, 0xfb, 0x42, 0xb6, 0xac, 0xce, 0xe2, 0x86, 0x1d, - 0xd8, 0x5a, 0xa3, 0x17, 0x25, 0xc9, 0x71, 0xef, 0x13, 0x5a, 0x9b, 0x1b, 0x92, 0xc3, 0x10, 0x62, - 0x49, 0xdc, 0xe7, 0x8a, 0x27, 0x71, 0x2e, 0x9d, 0x3d, 0xc0, 0x6b, 0xa2, 0x6f, 0x4d, 0x74, 0xd5, - 0xed, 0x28, 0x21, 0x49, 0xa6, 0x6c, 0x83, 0xed, 0x93, 0x0b, 0xb7, 0x76, 0x7a, 0xe1, 0xd6, 0xce, - 0x2e, 0xdc, 0xda, 0x97, 0xcc, 0xb5, 0x4e, 0x32, 0xd7, 0x3a, 0xcd, 0x5c, 0xeb, 0x2c, 0x73, 0xad, - 0xdf, 0x99, 0x6b, 0x7d, 0xfd, 0xe3, 0xd6, 0xde, 0xba, 0xd7, 0xff, 0x7f, 0xfe, 0x06, 0x00, 0x00, - 0xff, 0xff, 0xb1, 0xd0, 0x33, 0x02, 0xa0, 0x06, 0x00, 0x00, -} - -func (m *IPAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IPAddressList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ParentRef != nil { - { - size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ParentReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x1a - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.CIDRs) > 0 { - for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.CIDRs[iNdEx]) - copy(dAtA[i:], m.CIDRs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *IPAddress) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IPAddressList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IPAddressSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ParentRef != nil { - l = m.ParentRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ParentReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceCIDR) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceCIDRList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceCIDRSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.CIDRs) > 0 { - for _, s := range m.CIDRs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceCIDRStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *IPAddress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPAddress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IPAddressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]IPAddress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IPAddressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IPAddressSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPAddressSpec{`, - `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ParentReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ParentReference{`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceCIDR{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceCIDRList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ServiceCIDR{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ServiceCIDRList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ServiceCIDRSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceCIDRSpec{`, - `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceCIDRStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ServiceCIDRStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *IPAddress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPAddressList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, IPAddress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ParentRef == nil { - m.ParentRef = &ParentReference{} - } - if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ParentReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ServiceCIDR{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/e2e/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/e2e/vendor/k8s.io/api/networking/v1alpha1/generated.proto deleted file mode 100644 index 80ec6af735e..00000000000 --- a/e2e/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.api.networking.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/api/networking/v1alpha1"; - -// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs -// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. -// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, -// the name of the object is the IP address in canonical format, four decimal digits separated -// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. -// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 -// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 -message IPAddress { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the desired state of the IPAddress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional IPAddressSpec spec = 2; -} - -// IPAddressList contains a list of IPAddress. -message IPAddressList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of IPAddresses. - repeated IPAddress items = 2; -} - -// IPAddressSpec describe the attributes in an IP Address. -message IPAddressSpec { - // ParentRef references the resource that an IPAddress is attached to. - // An IPAddress must reference a parent object. - // +required - optional ParentReference parentRef = 1; -} - -// ParentReference describes a reference to a parent object. -message ParentReference { - // Group is the group of the object being referenced. - // +optional - optional string group = 1; - - // Resource is the resource of the object being referenced. - // +required - optional string resource = 2; - - // Namespace is the namespace of the object being referenced. - // +optional - optional string namespace = 3; - - // Name is the name of the object being referenced. - // +required - optional string name = 4; -} - -// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). -// This range is used to allocate ClusterIPs to Service objects. -message ServiceCIDR { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the desired state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ServiceCIDRSpec spec = 2; - - // status represents the current state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ServiceCIDRStatus status = 3; -} - -// ServiceCIDRList contains a list of ServiceCIDR objects. -message ServiceCIDRList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of ServiceCIDRs. - repeated ServiceCIDR items = 2; -} - -// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. -message ServiceCIDRSpec { - // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") - // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. - // The network address of each CIDR, the address that identifies the subnet of a host, is reserved - // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be - // allocated. - // This field is immutable. - // +optional - // +listType=atomic - repeated string cidrs = 1; -} - -// ServiceCIDRStatus describes the current state of the ServiceCIDR. -message ServiceCIDRStatus { - // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; -} - diff --git a/e2e/vendor/k8s.io/api/networking/v1alpha1/types.go b/e2e/vendor/k8s.io/api/networking/v1alpha1/types.go deleted file mode 100644 index 0e454f02635..00000000000 --- a/e2e/vendor/k8s.io/api/networking/v1alpha1/types.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs -// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. -// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, -// the name of the object is the IP address in canonical format, four decimal digits separated -// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. -// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 -// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 -type IPAddress struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // spec is the desired state of the IPAddress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// IPAddressSpec describe the attributes in an IP Address. -type IPAddressSpec struct { - // ParentRef references the resource that an IPAddress is attached to. - // An IPAddress must reference a parent object. - // +required - ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` -} - -// ParentReference describes a reference to a parent object. -type ParentReference struct { - // Group is the group of the object being referenced. - // +optional - Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` - // Resource is the resource of the object being referenced. - // +required - Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` - // Namespace is the namespace of the object being referenced. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` - // Name is the name of the object being referenced. - // +required - Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// IPAddressList contains a list of IPAddress. -type IPAddressList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // items is the list of IPAddresses. - Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). -// This range is used to allocate ClusterIPs to Service objects. -type ServiceCIDR struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // spec is the desired state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // status represents the current state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. -type ServiceCIDRSpec struct { - // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") - // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. - // The network address of each CIDR, the address that identifies the subnet of a host, is reserved - // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be - // allocated. - // This field is immutable. - // +optional - // +listType=atomic - CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` -} - -const ( - // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the - // apiserver to allocate ClusterIPs for Services. - ServiceCIDRConditionReady = "Ready" - // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is - // being deleted. - ServiceCIDRReasonTerminating = "Terminating" -) - -// ServiceCIDRStatus describes the current state of the ServiceCIDR. -type ServiceCIDRStatus struct { - // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// ServiceCIDRList contains a list of ServiceCIDR objects. -type ServiceCIDRList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // items is the list of ServiceCIDRs. - Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/e2e/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/e2e/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 4c8eb57a7a5..00000000000 --- a/e2e/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-codegen.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_IPAddress = map[string]string{ - "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (IPAddress) SwaggerDoc() map[string]string { - return map_IPAddress -} - -var map_IPAddressList = map[string]string{ - "": "IPAddressList contains a list of IPAddress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of IPAddresses.", -} - -func (IPAddressList) SwaggerDoc() map[string]string { - return map_IPAddressList -} - -var map_IPAddressSpec = map[string]string{ - "": "IPAddressSpec describe the attributes in an IP Address.", - "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", -} - -func (IPAddressSpec) SwaggerDoc() map[string]string { - return map_IPAddressSpec -} - -var map_ParentReference = map[string]string{ - "": "ParentReference describes a reference to a parent object.", - "group": "Group is the group of the object being referenced.", - "resource": "Resource is the resource of the object being referenced.", - "namespace": "Namespace is the namespace of the object being referenced.", - "name": "Name is the name of the object being referenced.", -} - -func (ParentReference) SwaggerDoc() map[string]string { - return map_ParentReference -} - -var map_ServiceCIDR = map[string]string{ - "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (ServiceCIDR) SwaggerDoc() map[string]string { - return map_ServiceCIDR -} - -var map_ServiceCIDRList = map[string]string{ - "": "ServiceCIDRList contains a list of ServiceCIDR objects.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of ServiceCIDRs.", -} - -func (ServiceCIDRList) SwaggerDoc() map[string]string { - return map_ServiceCIDRList -} - -var map_ServiceCIDRSpec = map[string]string{ - "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", - "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.", -} - -func (ServiceCIDRSpec) SwaggerDoc() map[string]string { - return map_ServiceCIDRSpec -} - -var map_ServiceCIDRStatus = map[string]string{ - "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", - "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", -} - -func (ServiceCIDRStatus) SwaggerDoc() map[string]string { - return map_ServiceCIDRStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/e2e/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/e2e/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go deleted file mode 100644 index 5f9c23f708c..00000000000 --- a/e2e/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -const ( - - // TODO: Use IPFamily as field with a field selector,And the value is set based on - // the name at create time and immutable. - // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. - // This label simplify dual-stack client operations allowing to obtain the list of - // IP addresses filtered by family. - LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" - // LabelManagedBy is used to indicate the controller or entity that manages - // an IPAddress. This label aims to enable different IPAddress - // objects to be managed by different controllers or entities within the - // same cluster. It is highly recommended to configure this label for all - // IPAddress objects. - LabelManagedBy = "ipaddress.kubernetes.io/managed-by" -) diff --git a/e2e/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/e2e/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 5c8f697ba36..00000000000 --- a/e2e/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,229 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddress) DeepCopyInto(out *IPAddress) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. -func (in *IPAddress) DeepCopy() *IPAddress { - if in == nil { - return nil - } - out := new(IPAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddress) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]IPAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. -func (in *IPAddressList) DeepCopy() *IPAddressList { - if in == nil { - return nil - } - out := new(IPAddressList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddressList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { - *out = *in - if in.ParentRef != nil { - in, out := &in.ParentRef, &out.ParentRef - *out = new(ParentReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. -func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { - if in == nil { - return nil - } - out := new(IPAddressSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ParentReference) DeepCopyInto(out *ParentReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. -func (in *ParentReference) DeepCopy() *ParentReference { - if in == nil { - return nil - } - out := new(ParentReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. -func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { - if in == nil { - return nil - } - out := new(ServiceCIDR) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCIDR) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceCIDR, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. -func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { - if in == nil { - return nil - } - out := new(ServiceCIDRList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { - *out = *in - if in.CIDRs != nil { - in, out := &in.CIDRs, &out.CIDRs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. -func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { - if in == nil { - return nil - } - out := new(ServiceCIDRSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. -func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { - if in == nil { - return nil - } - out := new(ServiceCIDRStatus) - in.DeepCopyInto(out) - return out -} diff --git a/e2e/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/e2e/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go deleted file mode 100644 index 714e7b6253b..00000000000 --- a/e2e/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ /dev/null @@ -1,94 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. - -package v1alpha1 - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { - return 1, 27 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { - return 1, 30 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddress) APILifecycleRemoved() (major, minor int) { - return 1, 33 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { - return 1, 27 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { - return 1, 30 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { - return 1, 33 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { - return 1, 27 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { - return 1, 30 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { - return 1, 33 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { - return 1, 27 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { - return 1, 30 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { - return 1, 33 -} diff --git a/e2e/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go b/e2e/vendor/k8s.io/api/resource/v1/devicetaint.go similarity index 50% rename from e2e/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go rename to e2e/vendor/k8s.io/api/resource/v1/devicetaint.go index fc7316521bf..a5c2e20a6e2 100644 --- a/e2e/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go +++ b/e2e/vendor/k8s.io/api/resource/v1/devicetaint.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2025 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by lister-gen. DO NOT EDIT. +package v1 -package v1alpha1 +import "fmt" -// IPAddressListerExpansion allows custom methods to be added to -// IPAddressLister. -type IPAddressListerExpansion interface{} +var _ fmt.Stringer = DeviceTaint{} -// ServiceCIDRListerExpansion allows custom methods to be added to -// ServiceCIDRLister. -type ServiceCIDRListerExpansion interface{} +// String converts to a string in the format '=:', '=:', ':', or ''. +func (t DeviceTaint) String() string { + if len(t.Effect) == 0 { + if len(t.Value) == 0 { + return fmt.Sprintf("%v", t.Key) + } + return fmt.Sprintf("%v=%v:", t.Key, t.Value) + } + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/e2e/vendor/k8s.io/api/networking/v1alpha1/doc.go b/e2e/vendor/k8s.io/api/resource/v1/doc.go similarity index 83% rename from e2e/vendor/k8s.io/api/networking/v1alpha1/doc.go rename to e2e/vendor/k8s.io/api/resource/v1/doc.go index 55264ae7075..c94ca75ddc3 100644 --- a/e2e/vendor/k8s.io/api/networking/v1alpha1/doc.go +++ b/e2e/vendor/k8s.io/api/resource/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Kubernetes Authors. +Copyright 2025 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:openapi-gen=true // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package -// +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -// +groupName=networking.k8s.io +// +groupName=resource.k8s.io -package v1alpha1 +// Package v1 is the v1 version of the resource API. +package v1 diff --git a/e2e/vendor/k8s.io/api/resource/v1/generated.pb.go b/e2e/vendor/k8s.io/api/resource/v1/generated.pb.go new file mode 100644 index 00000000000..5695e2c7e04 --- /dev/null +++ b/e2e/vendor/k8s.io/api/resource/v1/generated.pb.go @@ -0,0 +1,12777 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/resource/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} } +func (*AllocatedDeviceStatus) ProtoMessage() {} +func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{0} +} +func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src) +} +func (m *AllocatedDeviceStatus) XXX_Size() int { + return m.Size() +} +func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo + +func (m *AllocationResult) Reset() { *m = AllocationResult{} } +func (*AllocationResult) ProtoMessage() {} +func (*AllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{1} +} +func (m *AllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationResult.Merge(m, src) +} +func (m *AllocationResult) XXX_Size() int { + return m.Size() +} +func (m *AllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo + +func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} } +func (*CELDeviceSelector) ProtoMessage() {} +func (*CELDeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{2} +} +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_CELDeviceSelector.Merge(m, src) +} +func (m *CELDeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *CELDeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo + +func (m *CapacityRequestPolicy) Reset() { *m = CapacityRequestPolicy{} } +func (*CapacityRequestPolicy) ProtoMessage() {} +func (*CapacityRequestPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{3} +} +func (m *CapacityRequestPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CapacityRequestPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CapacityRequestPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRequestPolicy.Merge(m, src) +} +func (m *CapacityRequestPolicy) XXX_Size() int { + return m.Size() +} +func (m *CapacityRequestPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRequestPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRequestPolicy proto.InternalMessageInfo + +func (m *CapacityRequestPolicyRange) Reset() { *m = CapacityRequestPolicyRange{} } +func (*CapacityRequestPolicyRange) ProtoMessage() {} +func (*CapacityRequestPolicyRange) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{4} +} +func (m *CapacityRequestPolicyRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CapacityRequestPolicyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CapacityRequestPolicyRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRequestPolicyRange.Merge(m, src) +} +func (m *CapacityRequestPolicyRange) XXX_Size() int { + return m.Size() +} +func (m *CapacityRequestPolicyRange) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRequestPolicyRange.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRequestPolicyRange proto.InternalMessageInfo + +func (m *CapacityRequirements) Reset() { *m = CapacityRequirements{} } +func (*CapacityRequirements) ProtoMessage() {} +func (*CapacityRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{5} +} +func (m *CapacityRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CapacityRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CapacityRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRequirements.Merge(m, src) +} +func (m *CapacityRequirements) XXX_Size() int { + return m.Size() +} +func (m *CapacityRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRequirements.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRequirements proto.InternalMessageInfo + +func (m *Counter) Reset() { *m = Counter{} } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{6} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return m.Size() +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *CounterSet) Reset() { *m = CounterSet{} } +func (*CounterSet) ProtoMessage() {} +func (*CounterSet) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{7} +} +func (m *CounterSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CounterSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_CounterSet.Merge(m, src) +} +func (m *CounterSet) XXX_Size() int { + return m.Size() +} +func (m *CounterSet) XXX_DiscardUnknown() { + xxx_messageInfo_CounterSet.DiscardUnknown(m) +} + +var xxx_messageInfo_CounterSet proto.InternalMessageInfo + +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{8} +} +func (m *Device) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return m.Size() +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } +func (*DeviceAllocationConfiguration) ProtoMessage() {} +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{9} +} +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src) +} +func (m *DeviceAllocationConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo + +func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } +func (*DeviceAllocationResult) ProtoMessage() {} +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{10} +} +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationResult.Merge(m, src) +} +func (m *DeviceAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo + +func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } +func (*DeviceAttribute) ProtoMessage() {} +func (*DeviceAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{11} +} +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAttribute.Merge(m, src) +} +func (m *DeviceAttribute) XXX_Size() int { + return m.Size() +} +func (m *DeviceAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAttribute.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo + +func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} } +func (*DeviceCapacity) ProtoMessage() {} +func (*DeviceCapacity) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{12} +} +func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceCapacity) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceCapacity.Merge(m, src) +} +func (m *DeviceCapacity) XXX_Size() int { + return m.Size() +} +func (m *DeviceCapacity) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceCapacity.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo + +func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } +func (*DeviceClaim) ProtoMessage() {} +func (*DeviceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{13} +} +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaim.Merge(m, src) +} +func (m *DeviceClaim) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo + +func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } +func (*DeviceClaimConfiguration) ProtoMessage() {} +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{14} +} +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src) +} +func (m *DeviceClaimConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo + +func (m *DeviceClass) Reset() { *m = DeviceClass{} } +func (*DeviceClass) ProtoMessage() {} +func (*DeviceClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{15} +} +func (m *DeviceClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClass.Merge(m, src) +} +func (m *DeviceClass) XXX_Size() int { + return m.Size() +} +func (m *DeviceClass) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClass.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo + +func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } +func (*DeviceClassConfiguration) ProtoMessage() {} +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{16} +} +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassConfiguration.Merge(m, src) +} +func (m *DeviceClassConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo + +func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } +func (*DeviceClassList) ProtoMessage() {} +func (*DeviceClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{17} +} +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassList.Merge(m, src) +} +func (m *DeviceClassList) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassList) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo + +func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } +func (*DeviceClassSpec) ProtoMessage() {} +func (*DeviceClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{18} +} +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassSpec.Merge(m, src) +} +func (m *DeviceClassSpec) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo + +func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } +func (*DeviceConfiguration) ProtoMessage() {} +func (*DeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{19} +} +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConfiguration.Merge(m, src) +} +func (m *DeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo + +func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } +func (*DeviceConstraint) ProtoMessage() {} +func (*DeviceConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{20} +} +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConstraint.Merge(m, src) +} +func (m *DeviceConstraint) XXX_Size() int { + return m.Size() +} +func (m *DeviceConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo + +func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} } +func (*DeviceCounterConsumption) ProtoMessage() {} +func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{21} +} +func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceCounterConsumption.Merge(m, src) +} +func (m *DeviceCounterConsumption) XXX_Size() int { + return m.Size() +} +func (m *DeviceCounterConsumption) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo + +func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } +func (*DeviceRequest) ProtoMessage() {} +func (*DeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{22} +} +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequest.Merge(m, src) +} +func (m *DeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo + +func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } +func (*DeviceRequestAllocationResult) ProtoMessage() {} +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{23} +} +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src) +} +func (m *DeviceRequestAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo + +func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } +func (*DeviceSelector) ProtoMessage() {} +func (*DeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{24} +} +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSelector.Merge(m, src) +} +func (m *DeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *DeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo + +func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} } +func (*DeviceSubRequest) ProtoMessage() {} +func (*DeviceSubRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{25} +} +func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSubRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSubRequest.Merge(m, src) +} +func (m *DeviceSubRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceSubRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo + +func (m *DeviceTaint) Reset() { *m = DeviceTaint{} } +func (*DeviceTaint) ProtoMessage() {} +func (*DeviceTaint) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{26} +} +func (m *DeviceTaint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceTaint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceTaint.Merge(m, src) +} +func (m *DeviceTaint) XXX_Size() int { + return m.Size() +} +func (m *DeviceTaint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceTaint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo + +func (m *DeviceToleration) Reset() { *m = DeviceToleration{} } +func (*DeviceToleration) ProtoMessage() {} +func (*DeviceToleration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{27} +} +func (m *DeviceToleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceToleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceToleration.Merge(m, src) +} +func (m *DeviceToleration) XXX_Size() int { + return m.Size() +} +func (m *DeviceToleration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceToleration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo + +func (m *ExactDeviceRequest) Reset() { *m = ExactDeviceRequest{} } +func (*ExactDeviceRequest) ProtoMessage() {} +func (*ExactDeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{28} +} +func (m *ExactDeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExactDeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExactDeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExactDeviceRequest.Merge(m, src) +} +func (m *ExactDeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *ExactDeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExactDeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExactDeviceRequest proto.InternalMessageInfo + +func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} } +func (*NetworkDeviceData) ProtoMessage() {} +func (*NetworkDeviceData) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{29} +} +func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkDeviceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkDeviceData.Merge(m, src) +} +func (m *NetworkDeviceData) XXX_Size() int { + return m.Size() +} +func (m *NetworkDeviceData) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo + +func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } +func (*OpaqueDeviceConfiguration) ProtoMessage() {} +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{30} +} +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) +} +func (m *OpaqueDeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo + +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{31} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + +func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } +func (*ResourceClaimConsumerReference) ProtoMessage() {} +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{32} +} +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) +} +func (m *ResourceClaimConsumerReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo + +func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } +func (*ResourceClaimList) ProtoMessage() {} +func (*ResourceClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{33} +} +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimList.Merge(m, src) +} +func (m *ResourceClaimList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo + +func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } +func (*ResourceClaimSpec) ProtoMessage() {} +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{34} +} +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSpec.Merge(m, src) +} +func (m *ResourceClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo + +func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } +func (*ResourceClaimStatus) ProtoMessage() {} +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{35} +} +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimStatus.Merge(m, src) +} +func (m *ResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo + +func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } +func (*ResourceClaimTemplate) ProtoMessage() {} +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{36} +} +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) +} +func (m *ResourceClaimTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo + +func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } +func (*ResourceClaimTemplateList) ProtoMessage() {} +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{37} +} +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) +} +func (m *ResourceClaimTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo + +func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } +func (*ResourceClaimTemplateSpec) ProtoMessage() {} +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{38} +} +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) +} +func (m *ResourceClaimTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo + +func (m *ResourcePool) Reset() { *m = ResourcePool{} } +func (*ResourcePool) ProtoMessage() {} +func (*ResourcePool) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{39} +} +func (m *ResourcePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePool.Merge(m, src) +} +func (m *ResourcePool) XXX_Size() int { + return m.Size() +} +func (m *ResourcePool) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePool.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo + +func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } +func (*ResourceSlice) ProtoMessage() {} +func (*ResourceSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{40} +} +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSlice.Merge(m, src) +} +func (m *ResourceSlice) XXX_Size() int { + return m.Size() +} +func (m *ResourceSlice) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo + +func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } +func (*ResourceSliceList) ProtoMessage() {} +func (*ResourceSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{41} +} +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceList.Merge(m, src) +} +func (m *ResourceSliceList) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo + +func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } +func (*ResourceSliceSpec) ProtoMessage() {} +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f4fc532aec02d243, []int{42} +} +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceSpec.Merge(m, src) +} +func (m *ResourceSliceSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1.AllocatedDeviceStatus") + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1.AllocationResult") + proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1.CELDeviceSelector") + proto.RegisterType((*CapacityRequestPolicy)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicy") + proto.RegisterType((*CapacityRequestPolicyRange)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicyRange") + proto.RegisterType((*CapacityRequirements)(nil), "k8s.io.api.resource.v1.CapacityRequirements") + proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.CapacityRequirements.RequestsEntry") + proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1.Counter") + proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1.CounterSet") + proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.CounterSet.CountersEntry") + proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1.Device") + proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1.Device.AttributesEntry") + proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1.Device.CapacityEntry") + proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1.DeviceAllocationConfiguration") + proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceAllocationResult") + proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1.DeviceAttribute") + proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1.DeviceCapacity") + proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1.DeviceClaim") + proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClaimConfiguration") + proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1.DeviceClass") + proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClassConfiguration") + proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1.DeviceClassList") + proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1.DeviceClassSpec") + proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1.DeviceConfiguration") + proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1.DeviceConstraint") + proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption") + proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption.CountersEntry") + proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1.DeviceRequest") + proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult") + proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult.ConsumedCapacityEntry") + proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1.DeviceSelector") + proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1.DeviceSubRequest") + proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1.DeviceTaint") + proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1.DeviceToleration") + proto.RegisterType((*ExactDeviceRequest)(nil), "k8s.io.api.resource.v1.ExactDeviceRequest") + proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1.NetworkDeviceData") + proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1.OpaqueDeviceConfiguration") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1.ResourceClaimList") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1.ResourcePool") + proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1.ResourceSlice") + proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1.ResourceSliceList") + proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1.ResourceSliceSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/resource/v1/generated.proto", fileDescriptor_f4fc532aec02d243) +} + +var fileDescriptor_f4fc532aec02d243 = []byte{ + // 3028 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0x4d, 0x6c, 0x24, 0x47, + 0xf5, 0x77, 0xcf, 0xcc, 0x8e, 0xc7, 0x6f, 0x6c, 0xaf, 0x5d, 0xbb, 0xeb, 0x4c, 0xfc, 0xff, 0xc7, + 0xe3, 0xf4, 0x92, 0xc4, 0x49, 0x76, 0xc7, 0x6b, 0x8b, 0x44, 0x51, 0x12, 0x10, 0x1e, 0xdb, 0x9b, + 0x38, 0xfb, 0x11, 0xa7, 0xc6, 0x6b, 0x36, 0x28, 0x84, 0xb4, 0x7b, 0xca, 0x76, 0xe3, 0x9e, 0xee, + 0x49, 0x77, 0x8d, 0x77, 0xcd, 0x29, 0xe2, 0x00, 0x57, 0x04, 0x12, 0x02, 0x24, 0x24, 0x94, 0x03, + 0x12, 0x17, 0x84, 0x38, 0x11, 0x04, 0x28, 0xc7, 0x08, 0x29, 0x28, 0x17, 0xa4, 0x20, 0xa1, 0x81, + 0x1d, 0x4e, 0x48, 0x08, 0x89, 0x0b, 0x07, 0x1f, 0x10, 0xaa, 0xea, 0xaa, 0xfe, 0x9a, 0x6e, 0x4f, + 0xdb, 0x59, 0xaf, 0x96, 0x9b, 0xe7, 0xd5, 0x7b, 0xbf, 0xaa, 0x7a, 0xf5, 0xbe, 0xea, 0x75, 0x19, + 0x9e, 0xdc, 0x7b, 0xc1, 0xad, 0x19, 0xf6, 0xbc, 0xd6, 0x36, 0xe6, 0x1d, 0xe2, 0xda, 0x1d, 0x47, + 0x27, 0xf3, 0xfb, 0x0b, 0xf3, 0x3b, 0xc4, 0x22, 0x8e, 0x46, 0x49, 0xb3, 0xd6, 0x76, 0x6c, 0x6a, + 0xa3, 0x29, 0x8f, 0xaf, 0xa6, 0xb5, 0x8d, 0x9a, 0xe4, 0xab, 0xed, 0x2f, 0x4c, 0x5f, 0xde, 0x31, + 0xe8, 0x6e, 0x67, 0xab, 0xa6, 0xdb, 0xad, 0xf9, 0x1d, 0x7b, 0xc7, 0x9e, 0xe7, 0xec, 0x5b, 0x9d, + 0x6d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x79, 0x30, 0xd3, 0x6a, 0x68, 0x3a, 0xdd, 0x76, 0x92, 0xa6, + 0x9a, 0xfe, 0x7c, 0xc0, 0xd3, 0xd2, 0xf4, 0x5d, 0xc3, 0x22, 0xce, 0xc1, 0x7c, 0x7b, 0x6f, 0x27, + 0xba, 0xc6, 0xe3, 0x48, 0xb9, 0xf3, 0x2d, 0x42, 0xb5, 0xa4, 0xb9, 0xe6, 0xd3, 0xa4, 0x9c, 0x8e, + 0x45, 0x8d, 0x56, 0xff, 0x34, 0xcf, 0x0f, 0x12, 0x70, 0xf5, 0x5d, 0xd2, 0xd2, 0xe2, 0x72, 0xea, + 0x87, 0x79, 0xb8, 0xb0, 0x64, 0x9a, 0xb6, 0xce, 0x68, 0x2b, 0x64, 0xdf, 0xd0, 0x49, 0x83, 0x6a, + 0xb4, 0xe3, 0xa2, 0x27, 0xa1, 0xd8, 0x74, 0x8c, 0x7d, 0xe2, 0x54, 0x94, 0x59, 0x65, 0x6e, 0xa4, + 0x3e, 0xfe, 0x51, 0xb7, 0x3a, 0xd4, 0xeb, 0x56, 0x8b, 0x2b, 0x9c, 0x8a, 0xc5, 0x28, 0x9a, 0x85, + 0x42, 0xdb, 0xb6, 0xcd, 0x4a, 0x8e, 0x73, 0x8d, 0x0a, 0xae, 0xc2, 0xba, 0x6d, 0x9b, 0x98, 0x8f, + 0x70, 0x24, 0x8e, 0x5c, 0xc9, 0xc7, 0x90, 0x38, 0x15, 0x8b, 0x51, 0xf4, 0x04, 0x0c, 0xbb, 0xbb, + 0x9a, 0x43, 0xd6, 0x56, 0x2a, 0xc3, 0x9c, 0xb1, 0xdc, 0xeb, 0x56, 0x87, 0x1b, 0x1e, 0x09, 0xcb, + 0x31, 0xa4, 0x03, 0xe8, 0xb6, 0xd5, 0x34, 0xa8, 0x61, 0x5b, 0x6e, 0xa5, 0x30, 0x9b, 0x9f, 0x2b, + 0x2f, 0xce, 0xd7, 0x02, 0x3b, 0xf0, 0xf7, 0x5f, 0x6b, 0xef, 0xed, 0x30, 0x82, 0x5b, 0x63, 0x6a, + 0xae, 0xed, 0x2f, 0xd4, 0x96, 0xa5, 0x5c, 0x1d, 0x89, 0x35, 0x80, 0x4f, 0x72, 0x71, 0x08, 0x16, + 0x5d, 0x83, 0x42, 0x53, 0xa3, 0x5a, 0xe5, 0xcc, 0xac, 0x32, 0x57, 0x5e, 0xbc, 0x9c, 0x0a, 0x2f, + 0xd4, 0x5b, 0xc3, 0xda, 0x9d, 0xd5, 0xbb, 0x94, 0x58, 0x2e, 0x03, 0x2f, 0x31, 0x05, 0xac, 0x68, + 0x54, 0xc3, 0x1c, 0x04, 0xbd, 0x05, 0x65, 0x8b, 0xd0, 0x3b, 0xb6, 0xb3, 0xc7, 0x88, 0x95, 0x22, + 0xc7, 0x7c, 0xba, 0x96, 0x6c, 0xba, 0xb5, 0x9b, 0x82, 0x95, 0x2b, 0x85, 0x09, 0xd4, 0xcf, 0xf6, + 0xba, 0xd5, 0xf2, 0xcd, 0x00, 0x01, 0x87, 0xe1, 0xd4, 0xdf, 0xe4, 0x60, 0x42, 0x1c, 0xa1, 0x61, + 0x5b, 0x98, 0xb8, 0x1d, 0x93, 0xa2, 0x37, 0x61, 0xd8, 0xd3, 0xaa, 0xcb, 0x8f, 0xaf, 0xbc, 0x58, + 0x4b, 0x9b, 0xce, 0x9b, 0x27, 0x0e, 0x50, 0x3f, 0x2b, 0x14, 0x34, 0xec, 0x8d, 0xbb, 0x58, 0xe2, + 0xa1, 0x4d, 0x18, 0xb5, 0xec, 0x26, 0x69, 0x10, 0x93, 0xe8, 0xd4, 0x76, 0xf8, 0xa1, 0x96, 0x17, + 0x67, 0xc3, 0xf8, 0xcc, 0x85, 0xf8, 0x56, 0x42, 0x7c, 0xf5, 0x89, 0x5e, 0xb7, 0x3a, 0x1a, 0xa6, + 0xe0, 0x08, 0x0e, 0xea, 0xc0, 0x39, 0xcd, 0x5f, 0xc5, 0x86, 0xd1, 0x22, 0x2e, 0xd5, 0x5a, 0x6d, + 0x71, 0x02, 0xcf, 0x64, 0x3b, 0x60, 0x26, 0x56, 0x7f, 0xa4, 0xd7, 0xad, 0x9e, 0x5b, 0xea, 0x87, + 0xc2, 0x49, 0xf8, 0xea, 0x2b, 0x30, 0xb9, 0xbc, 0x7a, 0x5d, 0x98, 0xbe, 0x5c, 0xcb, 0x22, 0x00, + 0xb9, 0xdb, 0x76, 0x88, 0xcb, 0xce, 0x53, 0x38, 0x80, 0x6f, 0x32, 0xab, 0xfe, 0x08, 0x0e, 0x71, + 0xa9, 0x1f, 0xe4, 0xe0, 0xc2, 0xb2, 0xd6, 0xd6, 0x74, 0x83, 0x1e, 0x60, 0xf2, 0x6e, 0x87, 0xb8, + 0x74, 0xdd, 0x36, 0x0d, 0xfd, 0x00, 0xdd, 0x62, 0x87, 0xb1, 0xad, 0x75, 0x4c, 0x9a, 0x70, 0x18, + 0x7d, 0xbb, 0x09, 0x4e, 0xe7, 0x8d, 0x8e, 0x66, 0x51, 0x83, 0x1e, 0x78, 0x8e, 0xb0, 0xe2, 0x41, + 0x60, 0x89, 0x85, 0x08, 0x94, 0xf7, 0x35, 0xd3, 0x68, 0x6e, 0x6a, 0x66, 0x87, 0xb8, 0x95, 0x3c, + 0xf7, 0x84, 0xe3, 0x42, 0x9f, 0x13, 0xbb, 0x2a, 0x6f, 0x06, 0x50, 0x38, 0x8c, 0x8b, 0xb6, 0x00, + 0xf8, 0x4f, 0xac, 0x59, 0x3b, 0xa4, 0x52, 0xe0, 0x1b, 0x58, 0x4c, 0xb3, 0xa6, 0x44, 0x05, 0x70, + 0xc9, 0xfa, 0x38, 0xd3, 0xdd, 0xa6, 0x8f, 0x84, 0x43, 0xa8, 0xea, 0x7b, 0x39, 0x98, 0x4e, 0x17, + 0x45, 0x6b, 0x90, 0x6f, 0x19, 0xd6, 0x09, 0x95, 0x37, 0xdc, 0xeb, 0x56, 0xf3, 0x37, 0x0c, 0x0b, + 0x33, 0x0c, 0x0e, 0xa5, 0xdd, 0xe5, 0xd1, 0xea, 0xa4, 0x50, 0xda, 0x5d, 0xcc, 0x30, 0xd0, 0x75, + 0x28, 0xb8, 0x94, 0xb4, 0x85, 0x03, 0x1c, 0x17, 0x8b, 0x07, 0x89, 0x06, 0x25, 0x6d, 0xcc, 0x51, + 0xd4, 0xff, 0x28, 0x70, 0x3e, 0xac, 0x02, 0xc3, 0x21, 0x2d, 0x62, 0x51, 0x17, 0x1d, 0x40, 0xc9, + 0xf1, 0x54, 0xc2, 0x7c, 0x99, 0x9d, 0xf1, 0x8b, 0x59, 0xb4, 0x2f, 0xe5, 0x6b, 0x42, 0x9f, 0xee, + 0xaa, 0x45, 0x9d, 0x83, 0xfa, 0xe3, 0xe2, 0xbc, 0x4b, 0x92, 0xfc, 0xcd, 0xbf, 0x54, 0xc7, 0xde, + 0xe8, 0x68, 0xa6, 0xb1, 0x6d, 0x90, 0xe6, 0x4d, 0xad, 0x45, 0xb0, 0x3f, 0xdd, 0xf4, 0x1e, 0x8c, + 0x45, 0xa4, 0xd1, 0x04, 0xe4, 0xf7, 0xc8, 0x81, 0xe7, 0x10, 0x98, 0xfd, 0x89, 0x56, 0xe0, 0xcc, + 0x3e, 0xb3, 0x93, 0x93, 0x69, 0x14, 0x7b, 0xc2, 0x2f, 0xe6, 0x5e, 0x50, 0xd4, 0xb7, 0x61, 0x78, + 0xd9, 0xee, 0x58, 0x94, 0x38, 0xa8, 0x21, 0x41, 0x4f, 0x76, 0xe2, 0x63, 0x62, 0x8f, 0x67, 0xb8, + 0x05, 0x8b, 0x39, 0xd4, 0x7f, 0x28, 0x00, 0x62, 0x82, 0x06, 0xa1, 0x2c, 0x6f, 0x59, 0x5a, 0x8b, + 0x08, 0xe7, 0xf6, 0xf3, 0x16, 0xd7, 0x00, 0x1f, 0x41, 0x6f, 0x43, 0x49, 0xf7, 0xf8, 0xdd, 0x4a, + 0x8e, 0x2b, 0xfe, 0x4a, 0xaa, 0xe2, 0x7d, 0x5c, 0xf9, 0xa7, 0x50, 0xf7, 0x84, 0x54, 0xb7, 0x24, + 0x63, 0x1f, 0x73, 0xfa, 0x2d, 0x18, 0x8b, 0x30, 0x27, 0x68, 0xf7, 0xb9, 0xa8, 0x76, 0xab, 0x03, + 0xe6, 0x0f, 0xab, 0xf3, 0xdf, 0x25, 0x10, 0x09, 0x36, 0xc3, 0x56, 0x5d, 0x00, 0x8d, 0x52, 0xc7, + 0xd8, 0xea, 0x50, 0x22, 0x37, 0x3b, 0x20, 0x63, 0xd4, 0x96, 0x7c, 0x01, 0x6f, 0xab, 0x17, 0x65, + 0x7c, 0x0c, 0x06, 0xfa, 0x6d, 0x2b, 0x34, 0x0d, 0xda, 0x83, 0x92, 0x2e, 0x0c, 0x56, 0x04, 0xaf, + 0x4b, 0x03, 0xa6, 0x94, 0xf6, 0x1d, 0x33, 0x65, 0x49, 0x4e, 0x30, 0x65, 0x39, 0x01, 0xda, 0x87, + 0x09, 0xdd, 0xb6, 0xdc, 0x4e, 0x8b, 0xb8, 0x52, 0xe9, 0xa2, 0x76, 0xb8, 0x72, 0xf4, 0xa4, 0x82, + 0x7b, 0x99, 0x0b, 0xb7, 0x79, 0xf1, 0x50, 0x11, 0x13, 0x4f, 0x2c, 0xc7, 0x10, 0x71, 0xdf, 0x1c, + 0x68, 0x0e, 0x4a, 0x2c, 0xcb, 0xb1, 0xd5, 0xf0, 0x54, 0x36, 0x52, 0x1f, 0x65, 0x4b, 0xbe, 0x29, + 0x68, 0xd8, 0x1f, 0xed, 0xcb, 0xab, 0xc5, 0xfb, 0x94, 0x57, 0xe7, 0xa0, 0xa4, 0x99, 0x26, 0x63, + 0x70, 0x79, 0x5d, 0x55, 0xf2, 0x56, 0xb0, 0x24, 0x68, 0xd8, 0x1f, 0x45, 0xd7, 0xa0, 0x48, 0x35, + 0xc3, 0xa2, 0x6e, 0xa5, 0xc4, 0x35, 0x73, 0xf1, 0x68, 0xcd, 0x6c, 0x30, 0xde, 0xa0, 0x9a, 0xe3, + 0x3f, 0x5d, 0x2c, 0x20, 0xd0, 0x02, 0x94, 0xb7, 0x0c, 0xab, 0xe9, 0x6e, 0xd8, 0x0c, 0xbc, 0x32, + 0xc2, 0x67, 0xe6, 0x95, 0x4c, 0x3d, 0x20, 0xe3, 0x30, 0x0f, 0x5a, 0x86, 0x49, 0xf6, 0xd3, 0xb0, + 0x76, 0x82, 0xaa, 0xac, 0x02, 0xb3, 0xf9, 0xb9, 0x91, 0xfa, 0x85, 0x5e, 0xb7, 0x3a, 0x59, 0x8f, + 0x0f, 0xe2, 0x7e, 0x7e, 0x74, 0x1b, 0x2a, 0x82, 0x78, 0x55, 0x33, 0xcc, 0x8e, 0x43, 0x42, 0x58, + 0x65, 0x8e, 0xf5, 0xff, 0xbd, 0x6e, 0xb5, 0x52, 0x4f, 0xe1, 0xc1, 0xa9, 0xd2, 0x0c, 0x99, 0x15, + 0x10, 0x77, 0x6e, 0x74, 0x4c, 0x6a, 0xb4, 0xcd, 0x50, 0xcd, 0xe4, 0x56, 0x46, 0xf9, 0xf6, 0x38, + 0xf2, 0x52, 0x0a, 0x0f, 0x4e, 0x95, 0x9e, 0xde, 0x86, 0xb3, 0x31, 0x6f, 0x4a, 0x88, 0x05, 0x5f, + 0x88, 0xc6, 0x82, 0xa7, 0x06, 0x14, 0x74, 0x12, 0x2f, 0x14, 0x13, 0xa6, 0x75, 0x18, 0x8b, 0xb8, + 0x50, 0xc2, 0x2c, 0x2f, 0x47, 0x67, 0x79, 0x72, 0x80, 0x73, 0xc8, 0x84, 0x13, 0x0a, 0x3c, 0xdf, + 0xce, 0xc1, 0x63, 0xf1, 0xa2, 0x72, 0xd9, 0xb6, 0xb6, 0x8d, 0x9d, 0x8e, 0xc3, 0x7f, 0xa0, 0x2f, + 0x41, 0xd1, 0x03, 0x12, 0x11, 0x69, 0x4e, 0x9a, 0x50, 0x83, 0x53, 0x0f, 0xbb, 0xd5, 0xa9, 0xb8, + 0xa8, 0x37, 0x82, 0x85, 0x1c, 0xb3, 0x69, 0x3f, 0x27, 0xe6, 0xf8, 0xa1, 0x8e, 0x86, 0x73, 0x5a, + 0x90, 0xc2, 0xd0, 0x37, 0xe0, 0x5c, 0x53, 0xf8, 0x71, 0x68, 0x09, 0x22, 0x67, 0x3f, 0x3b, 0xc8, + 0xf5, 0x43, 0x22, 0xf5, 0xff, 0x13, 0xab, 0x3c, 0x97, 0x30, 0x88, 0x93, 0x26, 0x51, 0xff, 0xa4, + 0xc0, 0x54, 0x72, 0x79, 0x8d, 0xde, 0x81, 0x61, 0x87, 0xff, 0x25, 0x73, 0xfa, 0x73, 0x47, 0x2f, + 0x45, 0xec, 0x2c, 0xbd, 0x4c, 0xf7, 0x7e, 0xbb, 0x58, 0xc2, 0xa2, 0xaf, 0x42, 0x51, 0xe7, 0xab, + 0x11, 0xe1, 0xfc, 0xb9, 0xac, 0x17, 0x80, 0xe8, 0xae, 0x7d, 0xf7, 0xf6, 0xc8, 0x58, 0x80, 0xaa, + 0x3f, 0x53, 0xe0, 0x6c, 0xcc, 0xd2, 0xd0, 0x0c, 0xe4, 0x0d, 0x8b, 0x72, 0xcb, 0xc9, 0x7b, 0x07, + 0xb2, 0x66, 0x51, 0x2f, 0x07, 0xb3, 0x01, 0xf4, 0x38, 0x14, 0xb6, 0xd8, 0x55, 0x31, 0xcf, 0x9d, + 0x65, 0xac, 0xd7, 0xad, 0x8e, 0xd4, 0x6d, 0xdb, 0xf4, 0x38, 0xf8, 0x10, 0x7a, 0x0a, 0x8a, 0x2e, + 0x75, 0x0c, 0x6b, 0x87, 0x17, 0x9a, 0x23, 0x5e, 0xc0, 0x68, 0x70, 0x8a, 0xc7, 0x26, 0x86, 0xd1, + 0x33, 0x30, 0xbc, 0x4f, 0x1c, 0x5e, 0x9e, 0x7b, 0x61, 0x95, 0x87, 0xc1, 0x4d, 0x8f, 0xe4, 0xb1, + 0x4a, 0x06, 0xf5, 0x63, 0x05, 0xc6, 0xa3, 0xf6, 0x7a, 0x2a, 0x15, 0x06, 0xda, 0x86, 0x31, 0x27, + 0x5c, 0xbc, 0x0a, 0x1f, 0xba, 0x7c, 0xac, 0x62, 0xb9, 0x3e, 0xd9, 0xeb, 0x56, 0xc7, 0xa2, 0x45, + 0x70, 0x14, 0x56, 0xfd, 0x71, 0x0e, 0xca, 0x62, 0x3f, 0xa6, 0x66, 0xb4, 0x50, 0xa3, 0xaf, 0x42, + 0x7c, 0x22, 0x93, 0x35, 0x05, 0xd5, 0x49, 0x82, 0xe3, 0x7c, 0x0d, 0xca, 0x2c, 0x99, 0x51, 0xc7, + 0xcb, 0x08, 0x9e, 0x11, 0xcd, 0x0d, 0x74, 0x18, 0x21, 0x10, 0xdc, 0x2b, 0x02, 0x9a, 0x8b, 0xc3, + 0x88, 0xe8, 0xb6, 0x6f, 0xa0, 0xf9, 0x4c, 0x79, 0x98, 0x6d, 0x35, 0x9b, 0x6d, 0x7e, 0xa8, 0x40, + 0x25, 0x4d, 0x28, 0x12, 0x3a, 0x94, 0x93, 0x84, 0x8e, 0xdc, 0x83, 0x08, 0x1d, 0xbf, 0x56, 0x42, + 0x47, 0xec, 0xba, 0xe8, 0x1d, 0x28, 0xb1, 0x3b, 0x2e, 0xef, 0x49, 0x78, 0x26, 0x7b, 0x25, 0xdb, + 0x8d, 0xf8, 0xf5, 0xad, 0xaf, 0x13, 0x9d, 0xde, 0x20, 0x54, 0x0b, 0x2e, 0xb0, 0x01, 0x0d, 0xfb, + 0xa8, 0x68, 0x0d, 0x0a, 0x6e, 0x9b, 0xe8, 0xd9, 0xb2, 0x0b, 0x5f, 0x54, 0xa3, 0x4d, 0xf4, 0xa0, + 0x9a, 0x64, 0xbf, 0x30, 0x87, 0x50, 0xbf, 0x1f, 0xd6, 0xbf, 0xeb, 0x46, 0xf5, 0x9f, 0xa2, 0x55, + 0xe5, 0x41, 0x68, 0xf5, 0x03, 0x3f, 0x68, 0xf1, 0x85, 0x5d, 0x37, 0x5c, 0x8a, 0xde, 0xea, 0xd3, + 0x6c, 0x2d, 0x9b, 0x66, 0x99, 0x34, 0xd7, 0xab, 0xef, 0x45, 0x92, 0x12, 0xd2, 0xea, 0xab, 0x70, + 0xc6, 0xa0, 0xa4, 0x25, 0xfd, 0xe7, 0x62, 0x06, 0xb5, 0x06, 0xc1, 0x65, 0x8d, 0x49, 0x62, 0x0f, + 0x40, 0xfd, 0x6e, 0x2e, 0xb2, 0x76, 0xa6, 0x6e, 0xf4, 0x65, 0x18, 0x71, 0x45, 0x99, 0x27, 0x3d, + 0x7f, 0x40, 0xc2, 0xf6, 0xab, 0xc6, 0x49, 0x31, 0xc9, 0x88, 0xa4, 0xb8, 0x38, 0xc0, 0x0a, 0xf9, + 0x66, 0x2e, 0xa3, 0x6f, 0xc6, 0x8e, 0x39, 0xcd, 0x37, 0xd1, 0x75, 0x38, 0x4f, 0xee, 0x52, 0x62, + 0x35, 0x49, 0x13, 0x0b, 0x1c, 0x5e, 0x1b, 0x7b, 0xe1, 0xbe, 0xd2, 0xeb, 0x56, 0xcf, 0xaf, 0x26, + 0x8c, 0xe3, 0x44, 0x29, 0xd5, 0x84, 0xa4, 0xc3, 0x47, 0xb7, 0xa0, 0x68, 0xb7, 0xb5, 0x77, 0xfd, + 0xf0, 0xbe, 0x90, 0xb6, 0xfc, 0xd7, 0x39, 0x57, 0x92, 0x71, 0x01, 0x5b, 0xbb, 0x37, 0x8c, 0x05, + 0x98, 0xfa, 0x77, 0x05, 0x26, 0xe2, 0x81, 0xee, 0x18, 0xf1, 0x64, 0x1d, 0xc6, 0x5b, 0x1a, 0xd5, + 0x77, 0xfd, 0x84, 0x29, 0x7a, 0xa6, 0x73, 0xbd, 0x6e, 0x75, 0xfc, 0x46, 0x64, 0xe4, 0xb0, 0x5b, + 0x45, 0x57, 0x3b, 0xa6, 0x79, 0x10, 0xbd, 0xce, 0xc4, 0xe4, 0xd1, 0x9b, 0x30, 0xd9, 0x34, 0x5c, + 0x6a, 0x58, 0x3a, 0x0d, 0x40, 0xbd, 0x26, 0xeb, 0xb3, 0xac, 0x60, 0x5e, 0x89, 0x0f, 0xa6, 0xe0, + 0xf6, 0xa3, 0xa8, 0x3f, 0xca, 0xf9, 0x3e, 0xdc, 0x77, 0x01, 0x42, 0x8b, 0x00, 0xba, 0x7f, 0xe3, + 0x8d, 0xb7, 0xc7, 0x82, 0xbb, 0x30, 0x0e, 0x71, 0x21, 0xb3, 0xef, 0x36, 0xfd, 0xc5, 0xe3, 0x5e, + 0xbc, 0x1e, 0x9a, 0xbb, 0xf5, 0x3f, 0x15, 0x18, 0x8b, 0x64, 0xd2, 0x0c, 0x57, 0xec, 0x37, 0x60, + 0x98, 0xdc, 0xd5, 0x74, 0x6a, 0xca, 0xb2, 0xe0, 0x99, 0xb4, 0x09, 0x57, 0x19, 0x5b, 0x34, 0x51, + 0xf3, 0x06, 0xe0, 0xaa, 0x27, 0x8e, 0x25, 0x0e, 0xda, 0x85, 0xf1, 0x6d, 0xc3, 0x71, 0xe9, 0xd2, + 0xbe, 0x66, 0x98, 0xda, 0x96, 0x49, 0x44, 0x26, 0x1d, 0x90, 0xa5, 0x1b, 0x9d, 0x2d, 0x89, 0x3b, + 0x25, 0x16, 0x3a, 0x7e, 0x35, 0x82, 0x83, 0x63, 0xb8, 0xea, 0x1f, 0x8b, 0xb2, 0xa6, 0x4f, 0x29, + 0x44, 0xd1, 0xd3, 0xac, 0xa0, 0xe5, 0x43, 0x42, 0x07, 0xa1, 0xca, 0x94, 0x93, 0xb1, 0x1c, 0x0f, + 0x7d, 0x59, 0xc8, 0x65, 0xfa, 0xb2, 0x90, 0xcf, 0xf0, 0x65, 0xa1, 0x70, 0xe4, 0x97, 0x85, 0x05, + 0x28, 0x6b, 0xcd, 0x96, 0x61, 0x2d, 0xe9, 0x3a, 0x71, 0x5d, 0x5e, 0x30, 0x8a, 0xbb, 0xe8, 0x52, + 0x40, 0xc6, 0x61, 0x1e, 0x56, 0xfe, 0x50, 0xdb, 0x24, 0x8e, 0xb8, 0xdf, 0x15, 0xb3, 0x28, 0x76, + 0xc3, 0x17, 0x08, 0xca, 0x9f, 0x80, 0xe6, 0xe2, 0x30, 0x62, 0xf2, 0x65, 0x77, 0xf8, 0x3e, 0x5e, + 0x76, 0x4b, 0x9f, 0xe9, 0xb2, 0xfb, 0x5a, 0xf0, 0x31, 0x66, 0x84, 0xeb, 0xf6, 0x4a, 0xe8, 0x63, + 0xcc, 0x61, 0xb7, 0xfa, 0x78, 0xda, 0x07, 0x27, 0x7a, 0xd0, 0x26, 0x6e, 0xed, 0x56, 0xf8, 0x8b, + 0xcd, 0xfb, 0x8a, 0xdf, 0x7c, 0x69, 0xca, 0x9a, 0x97, 0xdf, 0xeb, 0xcb, 0x8b, 0xd7, 0x4e, 0x74, + 0xed, 0xa9, 0x2d, 0xc7, 0xd0, 0xbc, 0x80, 0xf0, 0x74, 0xac, 0x2f, 0xd3, 0x4c, 0x6f, 0x0c, 0xf5, + 0xad, 0x67, 0xda, 0x85, 0x0b, 0x89, 0xa8, 0xa7, 0xda, 0xf3, 0xdc, 0x94, 0x17, 0x13, 0xbf, 0x5b, + 0xb3, 0x02, 0x79, 0x9d, 0x98, 0x22, 0x6f, 0xa5, 0x7e, 0x23, 0xea, 0xfb, 0x62, 0xe1, 0xb5, 0xa6, + 0x97, 0x57, 0xaf, 0x63, 0x26, 0xae, 0x7e, 0xab, 0x20, 0x33, 0x55, 0xe0, 0xec, 0x19, 0x62, 0xd4, + 0x12, 0x9c, 0x6d, 0x06, 0x09, 0x9d, 0xe7, 0x65, 0xcf, 0x45, 0x1f, 0x11, 0xcc, 0xe1, 0x0a, 0x84, + 0xcb, 0xc5, 0xf9, 0xa3, 0x25, 0x49, 0xfe, 0x3e, 0x96, 0x24, 0x9b, 0x30, 0x1e, 0x7c, 0xbe, 0xb9, + 0x61, 0x37, 0xa5, 0xcf, 0xd7, 0x64, 0x08, 0x5b, 0x8a, 0x8c, 0x1e, 0x76, 0xab, 0xe7, 0xe3, 0x37, + 0x5b, 0x46, 0xc7, 0x31, 0x14, 0x74, 0x11, 0xce, 0xf0, 0xac, 0xc1, 0xa3, 0x42, 0x3e, 0x28, 0xbe, + 0x78, 0xd8, 0xc7, 0xde, 0xd8, 0xe9, 0x47, 0x83, 0xcd, 0x50, 0x2f, 0x74, 0x98, 0x9f, 0xfd, 0xa5, + 0xe3, 0x34, 0xf9, 0xbd, 0x9a, 0xc3, 0x1f, 0xf1, 0xb1, 0xd4, 0x7f, 0xf9, 0xf7, 0x08, 0xde, 0x9e, + 0x43, 0x8f, 0x85, 0x8c, 0xb9, 0x5e, 0x16, 0xcb, 0xca, 0x5f, 0x23, 0x07, 0x9e, 0x65, 0x5f, 0x0c, + 0x5b, 0xf6, 0x48, 0xca, 0x35, 0xf7, 0x25, 0x28, 0x92, 0xed, 0x6d, 0xa2, 0x53, 0x11, 0x99, 0x65, + 0xe3, 0xb7, 0xb8, 0xca, 0xa9, 0x87, 0xac, 0xf0, 0x08, 0xa6, 0xf4, 0x88, 0x58, 0x88, 0x30, 0xfb, + 0xa0, 0x46, 0x8b, 0x2c, 0x35, 0x9b, 0xa4, 0x29, 0x3e, 0x26, 0x1d, 0xe7, 0xdb, 0x1e, 0x6f, 0x1a, + 0x6c, 0x48, 0x00, 0x1c, 0x60, 0xbd, 0x58, 0xfa, 0xc1, 0x4f, 0xaa, 0x43, 0xef, 0xfd, 0x79, 0x76, + 0x48, 0x7d, 0x3f, 0x27, 0x8d, 0x3f, 0x50, 0xf7, 0xa0, 0x8d, 0xbf, 0x0a, 0x25, 0xbb, 0xcd, 0x78, + 0x6d, 0x99, 0x95, 0x2e, 0xc9, 0xea, 0xe2, 0x75, 0x41, 0x3f, 0xec, 0x56, 0x2b, 0x71, 0x58, 0x39, + 0x86, 0x7d, 0xe9, 0x40, 0x85, 0xf9, 0x4c, 0x2a, 0x2c, 0x1c, 0x5f, 0x85, 0xcb, 0x30, 0x19, 0x98, + 0x4e, 0x83, 0xe8, 0xb6, 0xd5, 0x74, 0x85, 0xf5, 0xf2, 0xcc, 0xb1, 0x11, 0x1f, 0xc4, 0xfd, 0xfc, + 0xea, 0x0f, 0x0b, 0x80, 0xfa, 0x0b, 0x8d, 0xa4, 0x08, 0xa0, 0x7c, 0x96, 0x08, 0x90, 0x3b, 0xd5, + 0x08, 0x90, 0xbf, 0xbf, 0x11, 0xa0, 0x70, 0x44, 0x04, 0x78, 0x18, 0x4b, 0x88, 0xd3, 0x0a, 0x1a, + 0x3f, 0x57, 0x60, 0xb2, 0xef, 0x15, 0x02, 0x7a, 0x09, 0xc6, 0x0c, 0x56, 0x08, 0x6f, 0x6b, 0xe2, + 0xca, 0xe6, 0x19, 0xc6, 0x05, 0xb1, 0xcc, 0xb1, 0xb5, 0xf0, 0x20, 0x8e, 0xf2, 0xa2, 0x47, 0x21, + 0x6f, 0xb4, 0x65, 0xaf, 0x96, 0xe7, 0xaa, 0xb5, 0x75, 0x17, 0x33, 0x1a, 0x33, 0xb9, 0x5d, 0xcd, + 0x69, 0xde, 0xd1, 0x1c, 0xe6, 0xc9, 0x0e, 0xd3, 0x6e, 0x3e, 0x6a, 0x72, 0xaf, 0x46, 0x87, 0x71, + 0x9c, 0x5f, 0xfd, 0xa9, 0x02, 0x8f, 0xa6, 0x5e, 0xe5, 0x32, 0xbf, 0x64, 0xd1, 0x00, 0xda, 0x9a, + 0xa3, 0xb5, 0x88, 0xb8, 0xa3, 0x9c, 0xe0, 0xe5, 0x87, 0x7f, 0x09, 0x5a, 0xf7, 0x81, 0x70, 0x08, + 0x54, 0xfd, 0x5e, 0x0e, 0xc6, 0xe4, 0x05, 0xd6, 0xeb, 0xdd, 0x9d, 0x7e, 0x63, 0xe7, 0x5a, 0xa4, + 0xb1, 0x93, 0x5a, 0x52, 0x44, 0x96, 0x95, 0xd6, 0xda, 0x41, 0x0d, 0x28, 0xba, 0xfc, 0x7d, 0xd0, + 0xa0, 0x0e, 0x7a, 0x14, 0x8e, 0x8b, 0x04, 0x8a, 0xf7, 0x7e, 0x63, 0x01, 0xa5, 0xf6, 0x14, 0x98, + 0x89, 0xf0, 0x8b, 0x42, 0xcc, 0xc1, 0x64, 0x9b, 0x38, 0xc4, 0xd2, 0x09, 0xba, 0x04, 0x25, 0xad, + 0x6d, 0xbc, 0xe2, 0xd8, 0x9d, 0xb6, 0x38, 0x45, 0xff, 0xf6, 0xb7, 0xb4, 0xbe, 0xc6, 0xe9, 0xd8, + 0xe7, 0x60, 0xdc, 0x72, 0x2d, 0xc2, 0x96, 0x42, 0x9d, 0x4e, 0x8f, 0x8e, 0x7d, 0x0e, 0xbf, 0x2e, + 0x2a, 0xa4, 0xd6, 0x45, 0x75, 0xc8, 0x77, 0x8c, 0xa6, 0x68, 0x34, 0x5f, 0x91, 0xc9, 0xe3, 0x56, + 0xd6, 0x42, 0x98, 0x09, 0xab, 0xbf, 0x55, 0x60, 0x32, 0xb2, 0xc9, 0x07, 0xd0, 0x7d, 0x7a, 0x2d, + 0xda, 0x7d, 0x7a, 0x22, 0xd3, 0x61, 0xa5, 0xf4, 0x9f, 0xf4, 0xd8, 0xf2, 0x79, 0x03, 0xea, 0x66, + 0xfc, 0x99, 0xd1, 0xc5, 0x0c, 0x4d, 0xdc, 0xf4, 0xb7, 0x45, 0xea, 0xaf, 0x72, 0x70, 0x2e, 0xc1, + 0x72, 0xd0, 0x6d, 0x80, 0x20, 0x68, 0x8b, 0xa9, 0x52, 0x23, 0x69, 0xdf, 0x47, 0x12, 0xfe, 0xf2, + 0x24, 0x44, 0x0d, 0x61, 0xa1, 0x16, 0x94, 0x1d, 0xe2, 0x12, 0x67, 0x9f, 0x34, 0xaf, 0xf2, 0xdc, + 0xcf, 0x14, 0xf5, 0x7c, 0x26, 0x45, 0xf5, 0x59, 0x69, 0x10, 0xb2, 0x71, 0x00, 0x89, 0xc3, 0xf8, + 0xe8, 0x76, 0xa0, 0x30, 0xef, 0xeb, 0xf3, 0xe5, 0x01, 0xbb, 0x88, 0xbe, 0xca, 0x3b, 0x42, 0x75, + 0x7f, 0x50, 0xe0, 0x42, 0x64, 0x79, 0x1b, 0xa4, 0xd5, 0x36, 0x35, 0x4a, 0x1e, 0x40, 0x88, 0x69, + 0x44, 0x42, 0xcc, 0x42, 0x26, 0xed, 0xc9, 0xe5, 0xa5, 0x76, 0x91, 0x3f, 0x56, 0xe0, 0xd1, 0x44, + 0x89, 0x07, 0xe0, 0x38, 0x38, 0xea, 0x38, 0x97, 0x8f, 0xb5, 0xa3, 0x14, 0x07, 0xfa, 0x7d, 0xda, + 0x7e, 0xb8, 0x27, 0xfd, 0x6f, 0xe5, 0x01, 0xf5, 0x17, 0x0a, 0x8c, 0x4a, 0xce, 0x75, 0xdb, 0x36, + 0x33, 0x5c, 0x2e, 0x17, 0x01, 0xc4, 0xeb, 0x53, 0xf9, 0x15, 0x25, 0x1f, 0xac, 0xf8, 0x15, 0x7f, + 0x04, 0x87, 0xb8, 0xd0, 0x6b, 0x80, 0xe4, 0xda, 0x1a, 0xa6, 0xec, 0x09, 0xf2, 0x90, 0x9e, 0xaf, + 0x4f, 0x0b, 0x59, 0x84, 0xfb, 0x38, 0x70, 0x82, 0x94, 0xfa, 0x3b, 0x25, 0xc8, 0xbd, 0x9c, 0xfc, + 0xf0, 0xe9, 0x9c, 0x2f, 0x2b, 0x55, 0xe7, 0xe1, 0x0c, 0xc2, 0x39, 0x1f, 0xc2, 0x0c, 0xc2, 0xd7, + 0x95, 0xe2, 0x00, 0xbf, 0x2c, 0xc4, 0xd6, 0xcf, 0x0d, 0x3f, 0x6b, 0x75, 0x76, 0x35, 0xf4, 0xce, + 0xb8, 0xbc, 0xf8, 0xb9, 0x41, 0x0b, 0x61, 0x46, 0x99, 0xd8, 0x33, 0x0c, 0x3f, 0xc8, 0xc9, 0x1f, + 0xeb, 0x41, 0x4e, 0xe1, 0x14, 0x1e, 0xe4, 0x9c, 0x39, 0xf2, 0x41, 0xce, 0x5a, 0x90, 0x2d, 0xbc, + 0xdb, 0xc3, 0xcc, 0xd1, 0xe9, 0xf5, 0x88, 0x57, 0xbb, 0x18, 0xa6, 0xda, 0xc4, 0xf1, 0xc8, 0xc1, + 0xda, 0x98, 0x27, 0x7a, 0x6f, 0x82, 0xa6, 0x7b, 0xdd, 0xea, 0xd4, 0x7a, 0x22, 0x07, 0x4e, 0x91, + 0x44, 0x5b, 0x30, 0xce, 0x5b, 0x7c, 0x4d, 0xff, 0x45, 0x95, 0xf7, 0x6e, 0x48, 0x1d, 0xfc, 0x4c, + 0x2e, 0xe8, 0x3c, 0x37, 0x22, 0x08, 0x38, 0x86, 0x58, 0x7f, 0xf9, 0xa3, 0x7b, 0x33, 0x43, 0x9f, + 0xdc, 0x9b, 0x19, 0xfa, 0xf4, 0xde, 0xcc, 0xd0, 0x7b, 0xbd, 0x19, 0xe5, 0xa3, 0xde, 0x8c, 0xf2, + 0x49, 0x6f, 0x46, 0xf9, 0xb4, 0x37, 0xa3, 0xfc, 0xb5, 0x37, 0xa3, 0x7c, 0xe7, 0x6f, 0x33, 0x43, + 0x5f, 0x99, 0x4a, 0xfe, 0x77, 0x81, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x0c, 0xec, 0x16, + 0x47, 0x30, 0x00, 0x00, +} + +func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ShareID != nil { + i -= len(*m.ShareID) + copy(dAtA[i:], *m.ShareID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ShareID))) + i-- + dAtA[i] = 0x3a + } + if m.NetworkData != nil { + { + size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Data != nil { + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x1a + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AllocationTimestamp != nil { + { + size, err := m.AllocationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CapacityRequestPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CapacityRequestPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CapacityRequestPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValidRange != nil { + { + size, err := m.ValidRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.ValidValues) > 0 { + for iNdEx := len(m.ValidValues) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidValues[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CapacityRequestPolicyRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CapacityRequestPolicyRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CapacityRequestPolicyRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Step != nil { + { + size, err := m.Step.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Max != nil { + { + size, err := m.Max.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Min != nil { + { + size, err := m.Min.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CapacityRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CapacityRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CapacityRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[QualifiedName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Counter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CounterSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CounterSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Counters) > 0 { + keysForCounters := make([]string, 0, len(m.Counters)) + for k := range m.Counters { + keysForCounters = append(keysForCounters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Counters[string(keysForCounters[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCounters[iNdEx]) + copy(dAtA[i:], keysForCounters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Device) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AllowMultipleAllocations != nil { + i-- + if *m.AllowMultipleAllocations { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if len(m.BindingFailureConditions) > 0 { + for iNdEx := len(m.BindingFailureConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingFailureConditions[iNdEx]) + copy(dAtA[i:], m.BindingFailureConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingFailureConditions[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.BindingConditions) > 0 { + for iNdEx := len(m.BindingConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingConditions[iNdEx]) + copy(dAtA[i:], m.BindingConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingConditions[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if m.BindsToNode != nil { + i-- + if *m.BindsToNode { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.AllNodes != nil { + i-- + if *m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x2a + } + if len(m.ConsumesCounters) > 0 { + for iNdEx := len(m.ConsumesCounters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConsumesCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Attributes) > 0 { + keysForAttributes := make([]string, 0, len(m.Attributes)) + for k := range m.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAttributes[iNdEx]) + copy(dAtA[i:], keysForAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VersionValue != nil { + i -= len(*m.VersionValue) + copy(dAtA[i:], *m.VersionValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue))) + i-- + dAtA[i] = 0x2a + } + if m.StringValue != nil { + i -= len(*m.StringValue) + copy(dAtA[i:], *m.StringValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) + i-- + dAtA[i] = 0x22 + } + if m.BoolValue != nil { + i-- + if *m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IntValue != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *DeviceCapacity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceCapacity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RequestPolicy != nil { + { + size, err := m.RequestPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExtendedResourceName != nil { + i -= len(*m.ExtendedResourceName) + copy(dAtA[i:], *m.ExtendedResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExtendedResourceName))) + i-- + dAtA[i] = 0x22 + } + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Opaque != nil { + { + size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DistinctAttribute != nil { + i -= len(*m.DistinctAttribute) + copy(dAtA[i:], *m.DistinctAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DistinctAttribute))) + i-- + dAtA[i] = 0x1a + } + if m.MatchAttribute != nil { + i -= len(*m.MatchAttribute) + copy(dAtA[i:], *m.MatchAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute))) + i-- + dAtA[i] = 0x12 + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceCounterConsumption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceCounterConsumption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Counters) > 0 { + keysForCounters := make([]string, 0, len(m.Counters)) + for k := range m.Counters { + keysForCounters = append(keysForCounters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Counters[string(keysForCounters[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCounters[iNdEx]) + copy(dAtA[i:], keysForCounters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CounterSet) + copy(dAtA[i:], m.CounterSet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CounterSet))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.FirstAvailable) > 0 { + for iNdEx := len(m.FirstAvailable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FirstAvailable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Exactly != nil { + { + size, err := m.Exactly.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConsumedCapacity) > 0 { + keysForConsumedCapacity := make([]string, 0, len(m.ConsumedCapacity)) + for k := range m.ConsumedCapacity { + keysForConsumedCapacity = append(keysForConsumedCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity) + for iNdEx := len(keysForConsumedCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.ConsumedCapacity[QualifiedName(keysForConsumedCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForConsumedCapacity[iNdEx]) + copy(dAtA[i:], keysForConsumedCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForConsumedCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if m.ShareID != nil { + i -= len(*m.ShareID) + copy(dAtA[i:], *m.ShareID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ShareID))) + i-- + dAtA[i] = 0x4a + } + if len(m.BindingFailureConditions) > 0 { + for iNdEx := len(m.BindingFailureConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingFailureConditions[iNdEx]) + copy(dAtA[i:], m.BindingFailureConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingFailureConditions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.BindingConditions) > 0 { + for iNdEx := len(m.BindingConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingConditions[iNdEx]) + copy(dAtA[i:], m.BindingConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingConditions[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x22 + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x1a + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0x12 + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CEL != nil { + { + size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceSubRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceSubRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceSubRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Capacity != nil { + { + size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x28 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x22 + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceTaint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceTaint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeAdded != nil { + { + size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x1a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceToleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceToleration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceToleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TolerationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + i-- + dAtA[i] = 0x28 + } + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x22 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExactDeviceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExactDeviceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExactDeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Capacity != nil { + { + size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x20 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x1a + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.HardwareAddress) + copy(dAtA[i:], m.HardwareAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress))) + i-- + dAtA[i] = 0x1a + if len(m.IPs) > 0 { + for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPs[iNdEx]) + copy(dAtA[i:], m.IPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.InterfaceName) + copy(dAtA[i:], m.InterfaceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ReservedFor) > 0 { + for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Allocation != nil { + { + size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourcePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SharedCounters) > 0 { + for iNdEx := len(m.SharedCounters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SharedCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.PerDeviceNodeSelection != nil { + i-- + if *m.PerDeviceNodeSelection { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.AllNodes != nil { + i-- + if *m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllocatedDeviceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NetworkData != nil { + l = m.NetworkData.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ShareID != nil { + l = len(*m.ShareID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AllocationTimestamp != nil { + l = m.AllocationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CELDeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CapacityRequestPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Default != nil { + l = m.Default.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ValidValues) > 0 { + for _, e := range m.ValidValues { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ValidRange != nil { + l = m.ValidRange.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CapacityRequestPolicyRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Min != nil { + l = m.Min.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Max != nil { + l = m.Max.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Step != nil { + l = m.Step.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CapacityRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Counter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CounterSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Counters) > 0 { + for k, v := range m.Counters { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Device) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.ConsumesCounters) > 0 { + for _, e := range m.ConsumesCounters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AllNodes != nil { + n += 2 + } + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.BindsToNode != nil { + n += 2 + } + if len(m.BindingConditions) > 0 { + for _, s := range m.BindingConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.BindingFailureConditions) > 0 { + for _, s := range m.BindingFailureConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowMultipleAllocations != nil { + n += 2 + } + return n +} + +func (m *DeviceAllocationConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntValue != nil { + n += 1 + sovGenerated(uint64(*m.IntValue)) + } + if m.BoolValue != nil { + n += 2 + } + if m.StringValue != nil { + l = len(*m.StringValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VersionValue != nil { + l = len(*m.VersionValue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceCapacity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestPolicy != nil { + l = m.RequestPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClaimConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ExtendedResourceName != nil { + l = len(*m.ExtendedResourceName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Opaque != nil { + l = m.Opaque.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchAttribute != nil { + l = len(*m.MatchAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DistinctAttribute != nil { + l = len(*m.DistinctAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceCounterConsumption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CounterSet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Counters) > 0 { + for k, v := range m.Counters { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *DeviceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Exactly != nil { + l = m.Exactly.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.FirstAvailable) > 0 { + for _, e := range m.FirstAvailable { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceRequestAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if m.AdminAccess != nil { + n += 2 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.BindingConditions) > 0 { + for _, s := range m.BindingConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.BindingFailureConditions) > 0 { + for _, s := range m.BindingFailureConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ShareID != nil { + l = len(*m.ShareID) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ConsumedCapacity) > 0 { + for k, v := range m.ConsumedCapacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *DeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CEL != nil { + l = m.CEL.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceSubRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Capacity != nil { + l = m.Capacity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceTaint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceToleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *ExactDeviceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + if m.AdminAccess != nil { + n += 2 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Capacity != nil { + l = m.Capacity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkDeviceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InterfaceName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.IPs) > 0 { + for _, s := range m.IPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.HardwareAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OpaqueDeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Parameters.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocation != nil { + l = m.Allocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ReservedFor) > 0 { + for _, e := range m.ReservedFor { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourcePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) + return n +} + +func (m *ResourceSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceSliceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Pool.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AllNodes != nil { + n += 2 + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.PerDeviceNodeSelection != nil { + n += 2 + } + if len(m.SharedCounters) > 0 { + for _, e := range m.SharedCounters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllocatedDeviceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&AllocatedDeviceStatus{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1) + `,`, + `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`, + `ShareID:` + valueToStringGenerated(this.ShareID) + `,`, + `}`, + }, "") + return s +} +func (this *AllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllocationResult{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllocationTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.AllocationTimestamp), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CELDeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CELDeviceSelector{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *CapacityRequestPolicy) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidValues := "[]Quantity{" + for _, f := range this.ValidValues { + repeatedStringForValidValues += fmt.Sprintf("%v", f) + "," + } + repeatedStringForValidValues += "}" + s := strings.Join([]string{`&CapacityRequestPolicy{`, + `Default:` + strings.Replace(fmt.Sprintf("%v", this.Default), "Quantity", "resource.Quantity", 1) + `,`, + `ValidValues:` + repeatedStringForValidValues + `,`, + `ValidRange:` + strings.Replace(this.ValidRange.String(), "CapacityRequestPolicyRange", "CapacityRequestPolicyRange", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CapacityRequestPolicyRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CapacityRequestPolicyRange{`, + `Min:` + strings.Replace(fmt.Sprintf("%v", this.Min), "Quantity", "resource.Quantity", 1) + `,`, + `Max:` + strings.Replace(fmt.Sprintf("%v", this.Max), "Quantity", "resource.Quantity", 1) + `,`, + `Step:` + strings.Replace(fmt.Sprintf("%v", this.Step), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CapacityRequirements) String() string { + if this == nil { + return "nil" + } + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "map[QualifiedName]resource.Quantity{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[QualifiedName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&CapacityRequirements{`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *Counter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Counter{`, + `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CounterSet) String() string { + if this == nil { + return "nil" + } + keysForCounters := make([]string, 0, len(this.Counters)) + for k := range this.Counters { + keysForCounters = append(keysForCounters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + mapStringForCounters := "map[string]Counter{" + for _, k := range keysForCounters { + mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k]) + } + mapStringForCounters += "}" + s := strings.Join([]string{`&CounterSet{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Counters:` + mapStringForCounters + `,`, + `}`, + }, "") + return s +} +func (this *Device) String() string { + if this == nil { + return "nil" + } + repeatedStringForConsumesCounters := "[]DeviceCounterConsumption{" + for _, f := range this.ConsumesCounters { + repeatedStringForConsumesCounters += strings.Replace(strings.Replace(f.String(), "DeviceCounterConsumption", "DeviceCounterConsumption", 1), `&`, ``, 1) + "," + } + repeatedStringForConsumesCounters += "}" + repeatedStringForTaints := "[]DeviceTaint{" + for _, f := range this.Taints { + repeatedStringForTaints += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTaints += "}" + keysForAttributes := make([]string, 0, len(this.Attributes)) + for k := range this.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + mapStringForAttributes := "map[QualifiedName]DeviceAttribute{" + for _, k := range keysForAttributes { + mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)]) + } + mapStringForAttributes += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "map[QualifiedName]DeviceCapacity{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&Device{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Attributes:` + mapStringForAttributes + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `ConsumesCounters:` + repeatedStringForConsumesCounters + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`, + `Taints:` + repeatedStringForTaints + `,`, + `BindsToNode:` + valueToStringGenerated(this.BindsToNode) + `,`, + `BindingConditions:` + fmt.Sprintf("%v", this.BindingConditions) + `,`, + `BindingFailureConditions:` + fmt.Sprintf("%v", this.BindingFailureConditions) + `,`, + `AllowMultipleAllocations:` + valueToStringGenerated(this.AllowMultipleAllocations) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAllocationConfiguration{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]DeviceRequestAllocationResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + "," + } + repeatedStringForResults += "}" + repeatedStringForConfig := "[]DeviceAllocationConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceAllocationResult{`, + `Results:` + repeatedStringForResults + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAttribute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAttribute{`, + `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, + `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, + `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, + `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceCapacity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceCapacity{`, + `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `RequestPolicy:` + strings.Replace(this.RequestPolicy.String(), "CapacityRequestPolicy", "CapacityRequestPolicy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaim) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequests := "[]DeviceRequest{" + for _, f := range this.Requests { + repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForRequests += "}" + repeatedStringForConstraints := "[]DeviceConstraint{" + for _, f := range this.Constraints { + repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForConstraints += "}" + repeatedStringForConfig := "[]DeviceClaimConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClaim{`, + `Requests:` + repeatedStringForRequests + `,`, + `Constraints:` + repeatedStringForConstraints + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaimConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClaimConfiguration{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClassConfiguration{`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeviceClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeviceClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForConfig := "[]DeviceClassConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClassSpec{`, + `Selectors:` + repeatedStringForSelectors + `,`, + `Config:` + repeatedStringForConfig + `,`, + `ExtendedResourceName:` + valueToStringGenerated(this.ExtendedResourceName) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConfiguration{`, + `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConstraint{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`, + `DistinctAttribute:` + valueToStringGenerated(this.DistinctAttribute) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceCounterConsumption) String() string { + if this == nil { + return "nil" + } + keysForCounters := make([]string, 0, len(this.Counters)) + for k := range this.Counters { + keysForCounters = append(keysForCounters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + mapStringForCounters := "map[string]Counter{" + for _, k := range keysForCounters { + mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k]) + } + mapStringForCounters += "}" + s := strings.Join([]string{`&DeviceCounterConsumption{`, + `CounterSet:` + fmt.Sprintf("%v", this.CounterSet) + `,`, + `Counters:` + mapStringForCounters + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForFirstAvailable := "[]DeviceSubRequest{" + for _, f := range this.FirstAvailable { + repeatedStringForFirstAvailable += strings.Replace(strings.Replace(f.String(), "DeviceSubRequest", "DeviceSubRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForFirstAvailable += "}" + s := strings.Join([]string{`&DeviceRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Exactly:` + strings.Replace(this.Exactly.String(), "ExactDeviceRequest", "ExactDeviceRequest", 1) + `,`, + `FirstAvailable:` + repeatedStringForFirstAvailable + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequestAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]DeviceToleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + keysForConsumedCapacity := make([]string, 0, len(this.ConsumedCapacity)) + for k := range this.ConsumedCapacity { + keysForConsumedCapacity = append(keysForConsumedCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity) + mapStringForConsumedCapacity := "map[QualifiedName]resource.Quantity{" + for _, k := range keysForConsumedCapacity { + mapStringForConsumedCapacity += fmt.Sprintf("%v: %v,", k, this.ConsumedCapacity[QualifiedName(k)]) + } + mapStringForConsumedCapacity += "}" + s := strings.Join([]string{`&DeviceRequestAllocationResult{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `BindingConditions:` + fmt.Sprintf("%v", this.BindingConditions) + `,`, + `BindingFailureConditions:` + fmt.Sprintf("%v", this.BindingFailureConditions) + `,`, + `ShareID:` + valueToStringGenerated(this.ShareID) + `,`, + `ConsumedCapacity:` + mapStringForConsumedCapacity + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceSelector{`, + `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSubRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForTolerations := "[]DeviceToleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + s := strings.Join([]string{`&DeviceSubRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `Capacity:` + strings.Replace(this.Capacity.String(), "CapacityRequirements", "CapacityRequirements", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceToleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceToleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ExactDeviceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForTolerations := "[]DeviceToleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + s := strings.Join([]string{`&ExactDeviceRequest{`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `Capacity:` + strings.Replace(this.Capacity.String(), "CapacityRequirements", "CapacityRequirements", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkDeviceData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkDeviceData{`, + `InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`, + `IPs:` + fmt.Sprintf("%v", this.IPs) + `,`, + `HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`, + `}`, + }, "") + return s +} +func (this *OpaqueDeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimConsumerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimConsumerReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSpec{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" + for _, f := range this.ReservedFor { + repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForReservedFor += "}" + repeatedStringForDevices := "[]AllocatedDeviceStatus{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceClaimStatus{`, + `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, + `ReservedFor:` + repeatedStringForReservedFor + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaimTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePool{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSlice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDevices := "[]Device{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + repeatedStringForSharedCounters := "[]CounterSet{" + for _, f := range this.SharedCounters { + repeatedStringForSharedCounters += strings.Replace(strings.Replace(f.String(), "CounterSet", "CounterSet", 1), `&`, ``, 1) + "," + } + repeatedStringForSharedCounters += "}" + s := strings.Join([]string{`&ResourceSliceSpec{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `PerDeviceNodeSelection:` + valueToStringGenerated(this.PerDeviceNodeSelection) + `,`, + `SharedCounters:` + repeatedStringForSharedCounters + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &runtime.RawExtension{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NetworkData == nil { + m.NetworkData = &NetworkDeviceData{} + } + if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ShareID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocationTimestamp == nil { + m.AllocationTimestamp = &v1.Time{} + } + if err := m.AllocationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CapacityRequestPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CapacityRequestPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CapacityRequestPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Default == nil { + m.Default = &resource.Quantity{} + } + if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidValues = append(m.ValidValues, resource.Quantity{}) + if err := m.ValidValues[len(m.ValidValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidRange == nil { + m.ValidRange = &CapacityRequestPolicyRange{} + } + if err := m.ValidRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CapacityRequestPolicyRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CapacityRequestPolicyRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CapacityRequestPolicyRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Min == nil { + m.Min = &resource.Quantity{} + } + if err := m.Min.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Max == nil { + m.Max = &resource.Quantity{} + } + if err := m.Max.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Step == nil { + m.Step = &resource.Quantity{} + } + if err := m.Step.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CapacityRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CapacityRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CapacityRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Requests == nil { + m.Requests = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Requests[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CounterSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CounterSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CounterSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counters == nil { + m.Counters = make(map[string]Counter) + } + var mapkey string + mapvalue := &Counter{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Counter{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Counters[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]DeviceCapacity) + } + var mapkey QualifiedName + mapvalue := &DeviceCapacity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceCapacity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsumesCounters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsumesCounters = append(m.ConsumesCounters, DeviceCounterConsumption{}) + if err := m.ConsumesCounters[len(m.ConsumesCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllNodes = &b + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, DeviceTaint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BindsToNode", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BindsToNode = &b + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingConditions = append(m.BindingConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingFailureConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingFailureConditions = append(m.BindingFailureConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowMultipleAllocations", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowMultipleAllocations = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BoolValue = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceCapacity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceCapacity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceCapacity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestPolicy == nil { + m.RequestPolicy = &CapacityRequestPolicy{} + } + if err := m.RequestPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeviceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ExtendedResourceName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} + } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinctAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.DistinctAttribute = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceCounterConsumption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceCounterConsumption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceCounterConsumption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CounterSet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CounterSet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counters == nil { + m.Counters = make(map[string]Counter) + } + var mapkey string + mapvalue := &Counter{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Counter{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Counters[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exactly", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exactly == nil { + m.Exactly = &ExactDeviceRequest{} + } + if err := m.Exactly.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FirstAvailable = append(m.FirstAvailable, DeviceSubRequest{}) + if err := m.FirstAvailable[len(m.FirstAvailable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, DeviceToleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingConditions = append(m.BindingConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingFailureConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingFailureConditions = append(m.BindingFailureConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.ShareID = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsumedCapacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsumedCapacity == nil { + m.ConsumedCapacity = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ConsumedCapacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} + } + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSubRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSubRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSubRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, DeviceToleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = &CapacityRequirements{} + } + if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceTaint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceTaint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceTaint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeAdded == nil { + m.TimeAdded = &v1.Time{} + } + if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceToleration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceToleration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceToleration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = DeviceTolerationOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TolerationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExactDeviceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExactDeviceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExactDeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, DeviceToleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = &CapacityRequirements{} + } + if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InterfaceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HardwareAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocation == nil { + m.Allocation = &AllocationResult{} + } + if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) + if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, AllocatedDeviceStatus{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaimTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType) + } + m.ResourceSliceCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceSliceCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllNodes = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PerDeviceNodeSelection", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PerDeviceNodeSelection = &b + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SharedCounters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SharedCounters = append(m.SharedCounters, CounterSet{}) + if err := m.SharedCounters[len(m.SharedCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/e2e/vendor/k8s.io/api/resource/v1/generated.proto b/e2e/vendor/k8s.io/api/resource/v1/generated.proto new file mode 100644 index 00000000000..816a430c26b --- /dev/null +++ b/e2e/vendor/k8s.io/api/resource/v1/generated.proto @@ -0,0 +1,1589 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.resource.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/resource/v1"; + +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +// +// The combination of Driver, Pool, Device, and ShareID must match the corresponding key +// in Status.Allocation.Devices. +message AllocatedDeviceStatus { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 2; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 3; + + // ShareID uniquely identifies an individual allocation share of the device. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional string shareID = 7; + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // Must not contain more than 8 entries. + // + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4; + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5; + + // NetworkData contains network-related information specific to the device. + // + // +optional + optional NetworkDeviceData networkData = 6; +} + +// AllocationResult contains attributes of an allocated resource. +message AllocationResult { + // Devices is the result of allocating devices. + // + // +optional + optional DeviceAllocationResult devices = 1; + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; + + // AllocationTimestamp stores the time when the resources were allocated. + // This field is not guaranteed to be set, in which case that time is unknown. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gate. + // + // +optional + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time allocationTimestamp = 5; +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +message CELDeviceSelector { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device + // (v1.34+ with the DRAConsumableCapacity feature enabled). + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // + // +required + optional string expression = 1; +} + +// CapacityRequestPolicy defines how requests consume device capacity. +// +// Must not set more than one ValidRequestValues. +message CapacityRequestPolicy { + // Default specifies how much of this capacity is consumed by a request + // that does not contain an entry for it in DeviceRequest's Capacity. + // + // +optional + optional .k8s.io.apimachinery.pkg.api.resource.Quantity default = 1; + + // ValidValues defines a set of acceptable quantity values in consuming requests. + // + // Must not contain more than 10 entries. + // Must be sorted in ascending order. + // + // If this field is set, + // Default must be defined and it must be included in ValidValues list. + // + // If the requested amount does not match any valid value but smaller than some valid values, + // the scheduler calculates the smallest valid value that is greater than or equal to the request. + // That is: min(ceil(requestedValue) ∈ validValues), where requestedValue ≤ max(validValues). + // + // If the requested amount exceeds all valid values, the request violates the policy, + // and this device cannot be allocated. + // + // +optional + // +listType=atomic + // +oneOf=ValidRequestValues + repeated .k8s.io.apimachinery.pkg.api.resource.Quantity validValues = 3; + + // ValidRange defines an acceptable quantity value range in consuming requests. + // + // If this field is set, + // Default must be defined and it must fall within the defined ValidRange. + // + // If the requested amount does not fall within the defined range, the request violates the policy, + // and this device cannot be allocated. + // + // If the request doesn't contain this capacity entry, Default value is used. + // + // +optional + // +oneOf=ValidRequestValues + optional CapacityRequestPolicyRange validRange = 4; +} + +// CapacityRequestPolicyRange defines a valid range for consumable capacity values. +// +// - If the requested amount is less than Min, it is rounded up to the Min value. +// - If Step is set and the requested amount is between Min and Max but not aligned with Step, +// it will be rounded up to the next value equal to Min + (n * Step). +// - If Step is not set, the requested amount is used as-is if it falls within the range Min to Max (if set). +// - If the requested or rounded amount exceeds Max (if set), the request does not satisfy the policy, +// and the device cannot be allocated. +message CapacityRequestPolicyRange { + // Min specifies the minimum capacity allowed for a consumption request. + // + // Min must be greater than or equal to zero, + // and less than or equal to the capacity value. + // requestPolicy.default must be more than or equal to the minimum. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity min = 1; + + // Max defines the upper limit for capacity that can be requested. + // + // Max must be less than or equal to the capacity value. + // Min and requestPolicy.default must be less than or equal to the maximum. + // + // +optional + optional .k8s.io.apimachinery.pkg.api.resource.Quantity max = 2; + + // Step defines the step size between valid capacity amounts within the range. + // + // Max (if set) and requestPolicy.default must be a multiple of Step. + // Min + Step must be less than or equal to the capacity value. + // + // +optional + optional .k8s.io.apimachinery.pkg.api.resource.Quantity step = 3; +} + +// CapacityRequirements defines the capacity requirements for a specific device request. +message CapacityRequirements { + // Requests represent individual device resource requests for distinct resources, + // all of which must be provided by the device. + // + // This value is used as an additional filtering condition against the available capacity on the device. + // This is semantically equivalent to a CEL selector with + // `device.capacity[]..compareTo(quantity()) >= 0`. + // For example, device.capacity['test-driver.cdi.k8s.io'].counters.compareTo(quantity('2')) >= 0. + // + // When a requestPolicy is defined, the requested amount is adjusted upward + // to the nearest valid value based on the policy. + // If the requested amount cannot be adjusted to a valid value—because it exceeds what the requestPolicy allows— + // the device is considered ineligible for allocation. + // + // For any capacity that is not explicitly requested: + // - If no requestPolicy is set, the default consumed capacity is equal to the full device capacity + // (i.e., the whole device is claimed). + // - If a requestPolicy is set, the default consumed capacity is determined according to that policy. + // + // If the device allows multiple allocation, + // the aggregated amount across all requests must not exceed the capacity value. + // The consumed capacity, which may be adjusted based on the requestPolicy if defined, + // is recorded in the resource claim’s status.devices[*].consumedCapacity field. + // + // +optional + map requests = 1; +} + +// Counter describes a quantity associated with a device. +message Counter { + // Value defines how much of a certain device counter is available. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; +} + +// CounterSet defines a named set of counters +// that are available to be used by devices defined in the +// ResourceSlice. +// +// The counters are not allocatable by themselves, but +// can be referenced by devices. When a device is allocated, +// the portion of counters it uses will no longer be available for use +// by other devices. +message CounterSet { + // Name defines the name of the counter set. + // It must be a DNS label. + // + // +required + optional string name = 1; + + // Counters defines the set of counters for this CounterSet + // The name of each counter must be unique in that set and must be a DNS label. + // + // The maximum number of counters in all sets is 32. + // + // +required + map counters = 2; +} + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +message Device { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + optional string name = 1; + + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map attributes = 2; + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map capacity = 3; + + // ConsumesCounters defines a list of references to sharedCounters + // and the set of counters that the device will + // consume from those counter sets. + // + // There can only be a single entry per counterSet. + // + // The total number of device counter consumption entries + // must be <= 32. In addition, the total number in the + // entire ResourceSlice must be <= 1024 (for example, + // 64 devices with 16 counters each). + // + // +optional + // +listType=atomic + // +featureGate=DRAPartitionableDevices + repeated DeviceCounterConsumption consumesCounters = 4; + + // NodeName identifies the node where the device is available. + // + // Must only be set if Spec.PerDeviceNodeSelection is set to true. + // At most one of NodeName, NodeSelector and AllNodes can be set. + // + // +optional + // +oneOf=DeviceNodeSelection + // +featureGate=DRAPartitionableDevices + optional string nodeName = 5; + + // NodeSelector defines the nodes where the device is available. + // + // Must use exactly one term. + // + // Must only be set if Spec.PerDeviceNodeSelection is set to true. + // At most one of NodeName, NodeSelector and AllNodes can be set. + // + // +optional + // +oneOf=DeviceNodeSelection + // +featureGate=DRAPartitionableDevices + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 6; + + // AllNodes indicates that all nodes have access to the device. + // + // Must only be set if Spec.PerDeviceNodeSelection is set to true. + // At most one of NodeName, NodeSelector and AllNodes can be set. + // + // +optional + // +oneOf=DeviceNodeSelection + // +featureGate=DRAPartitionableDevices + optional bool allNodes = 7; + + // If specified, these are the driver-defined taints. + // + // The maximum number of taints is 4. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceTaint taints = 8; + + // BindsToNode indicates if the usage of an allocation involving this device + // has to be limited to exactly the node that was chosen when allocating the claim. + // If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector + // to match the node where the allocation was made. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + optional bool bindsToNode = 9; + + // BindingConditions defines the conditions for proceeding with binding. + // All of these conditions must be set in the per-device status + // conditions with a value of True to proceed with binding the pod to the node + // while scheduling the pod. + // + // The maximum number of binding conditions is 4. + // + // The conditions must be a valid condition type string. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingConditions = 10; + + // BindingFailureConditions defines the conditions for binding failure. + // They may be set in the per-device status conditions. + // If any is set to "True", a binding failure occurred. + // + // The maximum number of binding failure conditions is 4. + // + // The conditions must be a valid condition type string. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingFailureConditions = 11; + + // AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests. + // + // If AllowMultipleAllocations is set to true, the device can be allocated more than once, + // and all of its capacity is consumable, regardless of whether the requestPolicy is defined or not. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional bool allowMultipleAllocations = 12; +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +message DeviceAllocationConfiguration { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + optional string source = 1; + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format

[/]. If just + // the main request is given, the configuration applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceCapacity describes a quantity associated with a device. +message DeviceCapacity { + // Value defines how much of a certain capacity that device has. + // + // This field reflects the fixed total capacity and does not change. + // The consumed amount is tracked separately by scheduler + // and does not affect this value. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + + // RequestPolicy defines how this DeviceCapacity must be consumed + // when the device is allowed to be shared by multiple allocations. + // + // The Device must have allowMultipleAllocations set to true in order to set a requestPolicy. + // + // If unset, capacity requests are unconstrained: + // requests can consume any amount of capacity, as long as the total consumed + // across all allocations does not exceed the device's defined capacity. + // If request is also unset, default is the full capacity value. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional CapacityRequestPolicy requestPolicy = 2; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
[/]. If just + // the main request is given, the configuration applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; + + // ExtendedResourceName is the extended resource name for the devices of this class. + // The devices of this class can be used to satisfy a pod's extended resource requests. + // It has the same format as the name of a pod's extended resource. + // It should be unique among all the device classes in a cluster. + // If two device classes have the same name, then the class created later + // is picked to satisfy a pod's extended resource requests. + // If two classes are created at the same time, then the name of the class + // lexicographically sorted first is picked. + // + // This is an alpha field. + // +optional + // +featureGate=DRAExtendedResource + optional string extendedResourceName = 4; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
[/]. If just + // the main request is given, the constraint applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; + + // DistinctAttribute requires that all devices in question have this + // attribute and that its type and value are unique across those devices. + // + // This acts as the inverse of MatchAttribute. + // + // This constraint is used to avoid allocating multiple requests to the same device + // by ensuring attribute-level differentiation. + // + // This is useful for scenarios where resource requests must be fulfilled by separate physical devices. + // For example, a container requests two network interfaces that must be allocated from two different physical NICs. + // + // +optional + // +oneOf=ConstraintType + // +featureGate=DRAConsumableCapacity + optional string distinctAttribute = 3; +} + +// DeviceCounterConsumption defines a set of counters that +// a device will consume from a CounterSet. +message DeviceCounterConsumption { + // CounterSet is the name of the set from which the + // counters defined will be consumed. + // + // +required + optional string counterSet = 1; + + // Counters defines the counters that will be consumed by the device. + // + // The maximum number counters in a device is 32. + // In addition, the maximum number of all counters + // in all devices is 1024 (for example, 64 devices with + // 16 counters each). + // + // +required + map counters = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. With FirstAvailable it is also +// possible to provide a prioritized list of requests. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // References using the name in the DeviceRequest will uniquely + // identify a request when the Exactly field is set. When the + // FirstAvailable field is set, a reference to the name of the + // DeviceRequest will match whatever subrequest is chosen by the + // scheduler. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // Exactly specifies the details for a single request that must + // be met exactly for the request to be satisfied. + // + // One of Exactly or FirstAvailable must be set. + // + // +optional + // +oneOf=deviceRequestType + optional ExactDeviceRequest exactly = 2; + + // FirstAvailable contains subrequests, of which exactly one will be + // selected by the scheduler. It tries to + // satisfy them in the order in which they are listed here. So if + // there are two entries in the list, the scheduler will only check + // the second one if it determines that the first one can not be used. + // + // DRA does not yet implement scoring, so the scheduler will + // select the first set of devices that satisfies all the + // requests in the claim. And if the requirements can + // be satisfied on more than one node, other scheduling features + // will determine which node is chosen. This means that the set of + // devices allocated to a claim might not be the optimal set + // available to the cluster. Scoring will be implemented later. + // + // +optional + // +oneOf=deviceRequestType + // +listType=atomic + // +featureGate=DRAPrioritizedList + repeated DeviceSubRequest firstAvailable = 3; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. If it references a subrequest in the + // firstAvailable list on a DeviceRequest, this field must + // include both the name of the main request and the subrequest + // using the format
/. + // + // Multiple devices may have been allocated per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 5; + + // A copy of all tolerations specified in the request at the time + // when the device got allocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 6; + + // BindingConditions contains a copy of the BindingConditions + // from the corresponding ResourceSlice at the time of allocation. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingConditions = 7; + + // BindingFailureConditions contains a copy of the BindingFailureConditions + // from the corresponding ResourceSlice at the time of allocation. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingFailureConditions = 8; + + // ShareID uniquely identifies an individual allocation share of the device, + // used when the device supports multiple simultaneous allocations. + // It serves as an additional map key to differentiate concurrent shares + // of the same device. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional string shareID = 9; + + // ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request. + // The consumed amount may differ from the requested amount: it is rounded up to the nearest valid + // value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount). + // + // The total consumed capacity for each device must not exceed the DeviceCapacity's Value. + // + // This field is populated only for devices that allow multiple allocations. + // All capacity entries are included, even if the consumed amount is zero. + // + // +optional + // +featureGate=DRAConsumableCapacity + map consumedCapacity = 10; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// DeviceSubRequest describes a request for device provided in the +// claim.spec.devices.requests[].firstAvailable array. Each +// is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// DeviceSubRequest is similar to ExactDeviceRequest, but doesn't expose the +// AdminAccess field as that one is only supported when requesting a +// specific device. +message DeviceSubRequest { + // Name can be used to reference this subrequest in the list of constraints + // or the list of configurations for the claim. References must use the + // format
/. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // subrequest. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // subrequest. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this subrequest. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This subrequest is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AllocationMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other subrequests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // If specified, the request's tolerations. + // + // Tolerations for NoSchedule are required to allocate a + // device which has a taint with that effect. The same applies + // to NoExecute. + // + // In addition, should any of the allocated devices get tainted + // with NoExecute after allocation and that effect is not tolerated, + // then all pods consuming the ResourceClaim get deleted to evict + // them. The scheduler will not let new pods reserve the claim while + // it has these tainted devices. Once all pods are evicted, the + // claim will get deallocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 6; + + // Capacity define resource requirements against each capacity. + // + // If this field is unset and the device supports multiple allocations, + // the default value will be applied to each capacity according to requestPolicy. + // For the capacity that has no requestPolicy, default is the full capacity value. + // + // Applies to each device allocation. + // If Count > 1, + // the request fails if there aren't enough devices that meet the requirements. + // If AllocationMode is set to All, + // the request fails if there are devices that otherwise match the request, + // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional CapacityRequirements capacity = 7; +} + +// The device this taint is attached to has the "effect" on +// any claim which does not tolerate the taint and, through the claim, +// to pods using the claim. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message DeviceTaint { + // The taint key to be applied to a device. + // Must be a label name. + // + // +required + optional string key = 1; + + // The taint value corresponding to the taint key. + // Must be a label value. + // + // +optional + optional string value = 2; + + // The effect of the taint on claims that do not tolerate the taint + // and through such claims on the pods using them. + // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for + // nodes is not valid here. + // + // +required + optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // Added automatically during create or update if not set. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; +} + +// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches +// the triple using the matching operator . +message DeviceToleration { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // Must be a label name. + // + // +optional + optional string key = 1; + + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a ResourceClaim can + // tolerate all taints of a particular category. + // + // +optional + // +default="Equal" + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value must be empty, otherwise just a regular string. + // Must be a label value. + // + // +optional + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule and NoExecute. + // + // +optional + optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // If larger than zero, the time when the pod needs to be evicted is calculated as